xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c (revision cd3e933325e68e23516a196a8fea7f49b1e497c3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2000 to 2010, LSI Corporation.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms of all code within
31  * this file that is exclusively owned by LSI, with or without
32  * modification, is permitted provided that, in addition to the CDDL 1.0
33  * License requirements, the following conditions are met:
34  *
35  *    Neither the name of the author nor the names of its contributors may be
36  *    used to endorse or promote products derived from this software without
37  *    specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
42  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
43  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
44  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
45  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
46  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
47  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
48  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
49  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
50  * DAMAGE.
51  */
52 
53 /*
54  * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
55  *
56  */
57 
58 #if defined(lint) || defined(DEBUG)
59 #define	MPTSAS_DEBUG
60 #endif
61 
62 /*
63  * standard header files.
64  */
65 #include <sys/note.h>
66 #include <sys/scsi/scsi.h>
67 #include <sys/pci.h>
68 #include <sys/file.h>
69 #include <sys/policy.h>
70 #include <sys/sysevent.h>
71 #include <sys/sysevent/eventdefs.h>
72 #include <sys/sysevent/dr.h>
73 #include <sys/sata/sata_defs.h>
74 #include <sys/scsi/generic/sas.h>
75 #include <sys/scsi/impl/scsi_sas.h>
76 
77 #pragma pack(1)
78 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
86 #pragma pack()
87 
88 /*
89  * private header files.
90  *
91  */
92 #include <sys/scsi/impl/scsi_reset_notify.h>
93 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
94 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
95 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
96 #include <sys/raidioctl.h>
97 
98 #include <sys/fs/dv_node.h>	/* devfs_clean */
99 
100 /*
101  * FMA header files
102  */
103 #include <sys/ddifm.h>
104 #include <sys/fm/protocol.h>
105 #include <sys/fm/util.h>
106 #include <sys/fm/io/ddi.h>
107 
108 /*
109  * autoconfiguration data and routines.
110  */
111 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
112 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
113 static int mptsas_power(dev_info_t *dip, int component, int level);
114 
115 /*
116  * cb_ops function
117  */
118 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
119 	cred_t *credp, int *rval);
120 #ifdef __sparc
121 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
122 #else  /* __sparc */
123 static int mptsas_quiesce(dev_info_t *devi);
124 #endif	/* __sparc */
125 
126 /*
127  * Resource initilaization for hardware
128  */
129 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
130 static void mptsas_disable_bus_master(mptsas_t *mpt);
131 static void mptsas_hba_fini(mptsas_t *mpt);
132 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
133 static int mptsas_alloc_request_frames(mptsas_t *mpt);
134 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
135 static int mptsas_alloc_free_queue(mptsas_t *mpt);
136 static int mptsas_alloc_post_queue(mptsas_t *mpt);
137 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
138 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
139 
140 /*
141  * SCSA function prototypes
142  */
143 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
144 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
145 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
146 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
147 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
148     int tgtonly);
149 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
150 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
151     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
152 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
153 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
154 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
155     struct scsi_pkt *pkt);
156 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
157     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
158 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
159     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
160 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
161     void (*callback)(caddr_t), caddr_t arg);
162 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
163 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
164 static int mptsas_scsi_quiesce(dev_info_t *dip);
165 static int mptsas_scsi_unquiesce(dev_info_t *dip);
166 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
167     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
168 
169 /*
170  * SMP functions
171  */
172 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
173 
174 /*
175  * internal function prototypes.
176  */
177 static int mptsas_quiesce_bus(mptsas_t *mpt);
178 static int mptsas_unquiesce_bus(mptsas_t *mpt);
179 
180 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
181 static void mptsas_free_handshake_msg(mptsas_t *mpt);
182 
183 static void mptsas_ncmds_checkdrain(void *arg);
184 
185 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
186 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
187 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
188 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
189 
190 static int mptsas_do_detach(dev_info_t *dev);
191 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
192 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
193     struct scsi_pkt *pkt);
194 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
195 
196 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
197 static void mptsas_handle_event(void *args);
198 static int mptsas_handle_event_sync(void *args);
199 static void mptsas_handle_dr(void *args);
200 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
201     dev_info_t *pdip);
202 
203 static void mptsas_restart_cmd(void *);
204 
205 static void mptsas_flush_hba(mptsas_t *mpt);
206 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
207 	uint8_t tasktype);
208 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
209     uchar_t reason, uint_t stat);
210 
211 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
212 static void mptsas_process_intr(mptsas_t *mpt,
213     pMpi2ReplyDescriptorsUnion_t reply_desc_union);
214 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
215     pMpi2ReplyDescriptorsUnion_t reply_desc);
216 static void mptsas_handle_address_reply(mptsas_t *mpt,
217     pMpi2ReplyDescriptorsUnion_t reply_desc);
218 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
219 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
220     uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
221 
222 static void mptsas_watch(void *arg);
223 static void mptsas_watchsubr(mptsas_t *mpt);
224 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
225 
226 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
227 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
228     uint8_t *data, uint32_t request_size, uint32_t reply_size,
229     uint32_t data_size, uint32_t direction, uint8_t *dataout,
230     uint32_t dataout_size, short timeout, int mode);
231 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
232 
233 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
234     uint32_t unique_id);
235 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
236 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
237     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
238 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
239     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
240     uint32_t diag_type);
241 static int mptsas_diag_register(mptsas_t *mpt,
242     mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
243 static int mptsas_diag_unregister(mptsas_t *mpt,
244     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
245 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
246     uint32_t *return_code);
247 static int mptsas_diag_read_buffer(mptsas_t *mpt,
248     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
249     uint32_t *return_code, int ioctl_mode);
250 static int mptsas_diag_release(mptsas_t *mpt,
251     mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
252 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
253     uint8_t *diag_action, uint32_t length, uint32_t *return_code,
254     int ioctl_mode);
255 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
256     int mode);
257 
258 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
259     int cmdlen, int tgtlen, int statuslen, int kf);
260 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
261 
262 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
263 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
264 
265 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
266     int kmflags);
267 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
268 
269 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
270     mptsas_cmd_t *cmd);
271 static void mptsas_check_task_mgt(mptsas_t *mpt,
272     pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
273 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
274     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
275     int *resid);
276 
277 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
278 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
279 
280 static void mptsas_restart_hba(mptsas_t *mpt);
281 static void mptsas_restart_waitq(mptsas_t *mpt);
282 
283 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
284 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
285 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
286 
287 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
288 static void mptsas_doneq_empty(mptsas_t *mpt);
289 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
290 
291 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
292 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
293 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
294 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
295 
296 
297 static void mptsas_start_watch_reset_delay();
298 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
299 static void mptsas_watch_reset_delay(void *arg);
300 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
301 
302 /*
303  * helper functions
304  */
305 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
306 
307 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
308 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
309 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
310     int lun);
311 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
312     int lun);
313 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
314 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
315 
316 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
317     int *lun);
318 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
319 
320 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
321     uint8_t phy);
322 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
323     uint64_t wwid);
324 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
325     uint64_t wwid);
326 
327 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
328     uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
329 
330 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
331     uint16_t *handle, mptsas_target_t **pptgt);
332 static void mptsas_update_phymask(mptsas_t *mpt);
333 
334 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
335     uint32_t *status, uint8_t cmd);
336 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
337     mptsas_phymask_t *phymask);
338 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
339     mptsas_phymask_t phymask);
340 static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
341     uint32_t slotstatus);
342 
343 
344 /*
345  * Enumeration / DR functions
346  */
347 static void mptsas_config_all(dev_info_t *pdip);
348 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
349     dev_info_t **lundip);
350 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
351     dev_info_t **lundip);
352 
353 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
354 static int mptsas_offline_target(dev_info_t *pdip, char *name);
355 
356 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
357     dev_info_t **dip);
358 
359 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
360 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
361     dev_info_t **dip, mptsas_target_t *ptgt);
362 
363 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
364     dev_info_t **dip, mptsas_target_t *ptgt, int lun);
365 
366 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
367     char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
368 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
369     char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
370     int lun);
371 
372 static void mptsas_offline_missed_luns(dev_info_t *pdip,
373     uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
374 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
375     mdi_pathinfo_t *rpip, uint_t flags);
376 
377 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
378     dev_info_t **smp_dip);
379 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
380     uint_t flags);
381 
382 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
383     int mode, int *rval);
384 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
385     int mode, int *rval);
386 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
387     int mode, int *rval);
388 static void mptsas_record_event(void *args);
389 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
390     int mode);
391 
392 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
393 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
394 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
395 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
396     mptsas_phymask_t key2);
397 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
398     mptsas_phymask_t key2);
399 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
400 
401 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
402     uint32_t, mptsas_phymask_t, uint8_t);
403 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
404     mptsas_smp_t *data);
405 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
406     mptsas_phymask_t phymask);
407 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
408 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
409 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
410     dev_info_t **smp_dip);
411 
412 /*
413  * Power management functions
414  */
415 static void mptsas_idle_pm(void *arg);
416 static int mptsas_init_pm(mptsas_t *mpt);
417 
418 /*
419  * MPT MSI tunable:
420  *
421  * By default MSI is enabled on all supported platforms.
422  */
423 boolean_t mptsas_enable_msi = B_TRUE;
424 
425 static int mptsas_add_intrs(mptsas_t *, int);
426 static void mptsas_rem_intrs(mptsas_t *);
427 
428 /*
429  * FMA Prototypes
430  */
431 static void mptsas_fm_init(mptsas_t *mpt);
432 static void mptsas_fm_fini(mptsas_t *mpt);
433 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
434 
435 extern pri_t minclsyspri, maxclsyspri;
436 
437 /*
438  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
439  * under this device that the paths to a physical device are created when
440  * MPxIO is used.
441  */
442 extern dev_info_t	*scsi_vhci_dip;
443 
444 /*
445  * Tunable timeout value for Inquiry VPD page 0x83
446  * By default the value is 30 seconds.
447  */
448 int mptsas_inq83_retry_timeout = 30;
449 
450 /*
451  * This is used to allocate memory for message frame storage, not for
452  * data I/O DMA. All message frames must be stored in the first 4G of
453  * physical memory.
454  */
455 ddi_dma_attr_t mptsas_dma_attrs = {
456 	DMA_ATTR_V0,	/* attribute layout version		*/
457 	0x0ull,		/* address low - should be 0 (longlong)	*/
458 	0xffffffffull,	/* address high - 32-bit max range	*/
459 	0x00ffffffull,	/* count max - max DMA object size	*/
460 	4,		/* allocation alignment requirements	*/
461 	0x78,		/* burstsizes - binary encoded values	*/
462 	1,		/* minxfer - gran. of DMA engine	*/
463 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
464 	0xffffffffull,	/* max segment size (DMA boundary)	*/
465 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
466 	512,		/* granularity - device transfer size	*/
467 	0		/* flags, set to 0			*/
468 };
469 
470 /*
471  * This is used for data I/O DMA memory allocation. (full 64-bit DMA
472  * physical addresses are supported.)
473  */
474 ddi_dma_attr_t mptsas_dma_attrs64 = {
475 	DMA_ATTR_V0,	/* attribute layout version		*/
476 	0x0ull,		/* address low - should be 0 (longlong)	*/
477 	0xffffffffffffffffull,	/* address high - 64-bit max	*/
478 	0x00ffffffull,	/* count max - max DMA object size	*/
479 	4,		/* allocation alignment requirements	*/
480 	0x78,		/* burstsizes - binary encoded values	*/
481 	1,		/* minxfer - gran. of DMA engine	*/
482 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
483 	0xffffffffull,	/* max segment size (DMA boundary)	*/
484 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
485 	512,		/* granularity - device transfer size	*/
486 	DDI_DMA_RELAXED_ORDERING	/* flags, enable relaxed ordering */
487 };
488 
489 ddi_device_acc_attr_t mptsas_dev_attr = {
490 	DDI_DEVICE_ATTR_V1,
491 	DDI_STRUCTURE_LE_ACC,
492 	DDI_STRICTORDER_ACC,
493 	DDI_DEFAULT_ACC
494 };
495 
496 static struct cb_ops mptsas_cb_ops = {
497 	scsi_hba_open,		/* open */
498 	scsi_hba_close,		/* close */
499 	nodev,			/* strategy */
500 	nodev,			/* print */
501 	nodev,			/* dump */
502 	nodev,			/* read */
503 	nodev,			/* write */
504 	mptsas_ioctl,		/* ioctl */
505 	nodev,			/* devmap */
506 	nodev,			/* mmap */
507 	nodev,			/* segmap */
508 	nochpoll,		/* chpoll */
509 	ddi_prop_op,		/* cb_prop_op */
510 	NULL,			/* streamtab */
511 	D_MP,			/* cb_flag */
512 	CB_REV,			/* rev */
513 	nodev,			/* aread */
514 	nodev			/* awrite */
515 };
516 
517 static struct dev_ops mptsas_ops = {
518 	DEVO_REV,		/* devo_rev, */
519 	0,			/* refcnt  */
520 	ddi_no_info,		/* info */
521 	nulldev,		/* identify */
522 	nulldev,		/* probe */
523 	mptsas_attach,		/* attach */
524 	mptsas_detach,		/* detach */
525 #ifdef  __sparc
526 	mptsas_reset,
527 #else
528 	nodev,			/* reset */
529 #endif  /* __sparc */
530 	&mptsas_cb_ops,		/* driver operations */
531 	NULL,			/* bus operations */
532 	mptsas_power,		/* power management */
533 #ifdef	__sparc
534 	ddi_quiesce_not_needed
535 #else
536 	mptsas_quiesce		/* quiesce */
537 #endif	/* __sparc */
538 };
539 
540 
541 #define	MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
542 
543 static struct modldrv modldrv = {
544 	&mod_driverops,	/* Type of module. This one is a driver */
545 	MPTSAS_MOD_STRING, /* Name of the module. */
546 	&mptsas_ops,	/* driver ops */
547 };
548 
549 static struct modlinkage modlinkage = {
550 	MODREV_1, &modldrv, NULL
551 };
552 #define	TARGET_PROP	"target"
553 #define	LUN_PROP	"lun"
554 #define	LUN64_PROP	"lun64"
555 #define	SAS_PROP	"sas-mpt"
556 #define	MDI_GUID	"wwn"
557 #define	NDI_GUID	"guid"
558 #define	MPTSAS_DEV_GONE	"mptsas_dev_gone"
559 
560 /*
561  * Local static data
562  */
563 #if defined(MPTSAS_DEBUG)
564 uint32_t mptsas_debug_flags = 0;
565 #endif	/* defined(MPTSAS_DEBUG) */
566 uint32_t mptsas_debug_resets = 0;
567 
568 static kmutex_t		mptsas_global_mutex;
569 static void		*mptsas_state;		/* soft	state ptr */
570 static krwlock_t	mptsas_global_rwlock;
571 
572 static kmutex_t		mptsas_log_mutex;
573 static char		mptsas_log_buf[256];
574 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
575 
576 static mptsas_t *mptsas_head, *mptsas_tail;
577 static clock_t mptsas_scsi_watchdog_tick;
578 static clock_t mptsas_tick;
579 static timeout_id_t mptsas_reset_watch;
580 static timeout_id_t mptsas_timeout_id;
581 static int mptsas_timeouts_enabled = 0;
582 /*
583  * warlock directives
584  */
585 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
586 	mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
587 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
588 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
589 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
590 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
591 
592 /*
593  * SM - HBA statics
594  */
595 
596 static char	*mptsas_driver_rev = MPTSAS_MOD_STRING;
597 
598 #ifdef MPTSAS_DEBUG
599 void debug_enter(char *);
600 #endif
601 
602 /*
603  * Notes:
604  *	- scsi_hba_init(9F) initializes SCSI HBA modules
605  *	- must call scsi_hba_fini(9F) if modload() fails
606  */
607 int
608 _init(void)
609 {
610 	int status;
611 	/* CONSTCOND */
612 	ASSERT(NO_COMPETING_THREADS);
613 
614 	NDBG0(("_init"));
615 
616 	status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
617 	    MPTSAS_INITIAL_SOFT_SPACE);
618 	if (status != 0) {
619 		return (status);
620 	}
621 
622 	if ((status = scsi_hba_init(&modlinkage)) != 0) {
623 		ddi_soft_state_fini(&mptsas_state);
624 		return (status);
625 	}
626 
627 	mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
628 	rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
629 	mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
630 
631 	if ((status = mod_install(&modlinkage)) != 0) {
632 		mutex_destroy(&mptsas_log_mutex);
633 		rw_destroy(&mptsas_global_rwlock);
634 		mutex_destroy(&mptsas_global_mutex);
635 		ddi_soft_state_fini(&mptsas_state);
636 		scsi_hba_fini(&modlinkage);
637 	}
638 
639 	return (status);
640 }
641 
642 /*
643  * Notes:
644  *	- scsi_hba_fini(9F) uninitializes SCSI HBA modules
645  */
646 int
647 _fini(void)
648 {
649 	int	status;
650 	/* CONSTCOND */
651 	ASSERT(NO_COMPETING_THREADS);
652 
653 	NDBG0(("_fini"));
654 
655 	if ((status = mod_remove(&modlinkage)) == 0) {
656 		ddi_soft_state_fini(&mptsas_state);
657 		scsi_hba_fini(&modlinkage);
658 		mutex_destroy(&mptsas_global_mutex);
659 		rw_destroy(&mptsas_global_rwlock);
660 		mutex_destroy(&mptsas_log_mutex);
661 	}
662 	return (status);
663 }
664 
665 /*
666  * The loadable-module _info(9E) entry point
667  */
668 int
669 _info(struct modinfo *modinfop)
670 {
671 	/* CONSTCOND */
672 	ASSERT(NO_COMPETING_THREADS);
673 	NDBG0(("mptsas _info"));
674 
675 	return (mod_info(&modlinkage, modinfop));
676 }
677 
678 
679 static int
680 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
681 {
682 	dev_info_t		*pdip;
683 	mptsas_t		*mpt;
684 	scsi_hba_tran_t		*hba_tran;
685 	char			*iport = NULL;
686 	char			phymask[MPTSAS_MAX_PHYS];
687 	mptsas_phymask_t	phy_mask = 0;
688 	int			dynamic_port = 0;
689 	uint32_t		page_address;
690 	char			initiator_wwnstr[MPTSAS_WWN_STRLEN];
691 	int			rval = DDI_FAILURE;
692 	int			i = 0;
693 	uint8_t			numphys = 0;
694 	uint8_t			phy_id;
695 	uint8_t			phy_port = 0;
696 	uint16_t		attached_devhdl = 0;
697 	uint32_t		dev_info;
698 	uint64_t		attached_sas_wwn;
699 	uint16_t		dev_hdl;
700 	uint16_t		pdev_hdl;
701 	uint16_t		bay_num, enclosure;
702 	char			attached_wwnstr[MPTSAS_WWN_STRLEN];
703 
704 	/* CONSTCOND */
705 	ASSERT(NO_COMPETING_THREADS);
706 
707 	switch (cmd) {
708 	case DDI_ATTACH:
709 		break;
710 
711 	case DDI_RESUME:
712 		/*
713 		 * If this a scsi-iport node, nothing to do here.
714 		 */
715 		return (DDI_SUCCESS);
716 
717 	default:
718 		return (DDI_FAILURE);
719 	}
720 
721 	pdip = ddi_get_parent(dip);
722 
723 	if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
724 	    NULL) {
725 		cmn_err(CE_WARN, "Failed attach iport because fail to "
726 		    "get tran vector for the HBA node");
727 		return (DDI_FAILURE);
728 	}
729 
730 	mpt = TRAN2MPT(hba_tran);
731 	ASSERT(mpt != NULL);
732 	if (mpt == NULL)
733 		return (DDI_FAILURE);
734 
735 	if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
736 	    NULL) {
737 		mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
738 		    "get tran vector for the iport node");
739 		return (DDI_FAILURE);
740 	}
741 
742 	/*
743 	 * Overwrite parent's tran_hba_private to iport's tran vector
744 	 */
745 	hba_tran->tran_hba_private = mpt;
746 
747 	ddi_report_dev(dip);
748 
749 	/*
750 	 * Get SAS address for initiator port according dev_handle
751 	 */
752 	iport = ddi_get_name_addr(dip);
753 	if (iport && strncmp(iport, "v0", 2) == 0) {
754 		if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
755 		    MPTSAS_VIRTUAL_PORT, 1) !=
756 		    DDI_PROP_SUCCESS) {
757 			(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
758 			    MPTSAS_VIRTUAL_PORT);
759 			mptsas_log(mpt, CE_WARN, "mptsas virtual port "
760 			    "prop update failed");
761 			return (DDI_FAILURE);
762 		}
763 		return (DDI_SUCCESS);
764 	}
765 
766 	mutex_enter(&mpt->m_mutex);
767 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
768 		bzero(phymask, sizeof (phymask));
769 		(void) sprintf(phymask,
770 		    "%x", mpt->m_phy_info[i].phy_mask);
771 		if (strcmp(phymask, iport) == 0) {
772 			break;
773 		}
774 	}
775 
776 	if (i == MPTSAS_MAX_PHYS) {
777 		mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
778 		    "seems not exist", iport);
779 		mutex_exit(&mpt->m_mutex);
780 		return (DDI_FAILURE);
781 	}
782 
783 	phy_mask = mpt->m_phy_info[i].phy_mask;
784 
785 	if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
786 		dynamic_port = 1;
787 	else
788 		dynamic_port = 0;
789 
790 	/*
791 	 * Update PHY info for smhba
792 	 */
793 	if (mptsas_smhba_phy_init(mpt)) {
794 		mutex_exit(&mpt->m_mutex);
795 		mptsas_log(mpt, CE_WARN, "mptsas phy update "
796 		    "failed");
797 		return (DDI_FAILURE);
798 	}
799 
800 	mutex_exit(&mpt->m_mutex);
801 
802 	numphys = 0;
803 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
804 		if ((phy_mask >> i) & 0x01) {
805 			numphys++;
806 		}
807 	}
808 
809 	bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
810 	(void) sprintf(initiator_wwnstr, "w%016"PRIx64,
811 	    mpt->un.m_base_wwid);
812 
813 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
814 	    SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
815 	    DDI_PROP_SUCCESS) {
816 		(void) ddi_prop_remove(DDI_DEV_T_NONE,
817 		    dip, SCSI_ADDR_PROP_INITIATOR_PORT);
818 		mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
819 		    "prop update failed");
820 		return (DDI_FAILURE);
821 	}
822 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
823 	    MPTSAS_NUM_PHYS, numphys) !=
824 	    DDI_PROP_SUCCESS) {
825 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
826 		return (DDI_FAILURE);
827 	}
828 
829 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
830 	    "phymask", phy_mask) !=
831 	    DDI_PROP_SUCCESS) {
832 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
833 		mptsas_log(mpt, CE_WARN, "mptsas phy mask "
834 		    "prop update failed");
835 		return (DDI_FAILURE);
836 	}
837 
838 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
839 	    "dynamic-port", dynamic_port) !=
840 	    DDI_PROP_SUCCESS) {
841 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
842 		mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
843 		    "prop update failed");
844 		return (DDI_FAILURE);
845 	}
846 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
847 	    MPTSAS_VIRTUAL_PORT, 0) !=
848 	    DDI_PROP_SUCCESS) {
849 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
850 		    MPTSAS_VIRTUAL_PORT);
851 		mptsas_log(mpt, CE_WARN, "mptsas virtual port "
852 		    "prop update failed");
853 		return (DDI_FAILURE);
854 	}
855 	mptsas_smhba_set_phy_props(mpt,
856 	    iport, dip, numphys, &attached_devhdl);
857 
858 	mutex_enter(&mpt->m_mutex);
859 	page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
860 	    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
861 	rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
862 	    &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
863 	    &pdev_hdl, &bay_num, &enclosure);
864 	if (rval != DDI_SUCCESS) {
865 		mptsas_log(mpt, CE_WARN,
866 		    "Failed to get device page0 for handle:%d",
867 		    attached_devhdl);
868 		mutex_exit(&mpt->m_mutex);
869 		return (DDI_FAILURE);
870 	}
871 
872 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
873 		bzero(phymask, sizeof (phymask));
874 		(void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
875 		if (strcmp(phymask, iport) == 0) {
876 			(void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
877 			    "%x",
878 			    mpt->m_phy_info[i].phy_mask);
879 		}
880 	}
881 	mutex_exit(&mpt->m_mutex);
882 
883 	bzero(attached_wwnstr, sizeof (attached_wwnstr));
884 	(void) sprintf(attached_wwnstr, "w%016"PRIx64,
885 	    attached_sas_wwn);
886 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
887 	    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
888 	    DDI_PROP_SUCCESS) {
889 		(void) ddi_prop_remove(DDI_DEV_T_NONE,
890 		    dip, SCSI_ADDR_PROP_ATTACHED_PORT);
891 		return (DDI_FAILURE);
892 	}
893 
894 	/* Create kstats for each phy on this iport */
895 
896 	mptsas_create_phy_stats(mpt, iport, dip);
897 
898 	/*
899 	 * register sas hba iport with mdi (MPxIO/vhci)
900 	 */
901 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
902 	    dip, 0) == MDI_SUCCESS) {
903 		mpt->m_mpxio_enable = TRUE;
904 	}
905 	return (DDI_SUCCESS);
906 }
907 
908 /*
909  * Notes:
910  *	Set up all device state and allocate data structures,
911  *	mutexes, condition variables, etc. for device operation.
912  *	Add interrupts needed.
913  *	Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
914  */
915 static int
916 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
917 {
918 	mptsas_t		*mpt = NULL;
919 	int			instance, i, j;
920 	int			doneq_thread_num;
921 	char			buf[64];
922 	char			intr_added = 0;
923 	char			map_setup = 0;
924 	char			config_setup = 0;
925 	char			hba_attach_setup = 0;
926 	char			smp_attach_setup = 0;
927 	char			mutex_init_done = 0;
928 	char			event_taskq_create = 0;
929 	char			dr_taskq_create = 0;
930 	char			doneq_thread_create = 0;
931 	char			chiprev, hw_rev[24];
932 	char			serial_number[72];
933 	scsi_hba_tran_t		*hba_tran;
934 	int			intr_types;
935 	uint_t			mem_bar = MEM_SPACE;
936 	mptsas_phymask_t	mask = 0x0;
937 	int			tran_flags = 0;
938 	int			rval = DDI_FAILURE;
939 	int			sm_hba = 1;
940 	int			num_phys = 0;
941 	int			protocol = 0;
942 
943 	/* CONSTCOND */
944 	ASSERT(NO_COMPETING_THREADS);
945 
946 	if (scsi_hba_iport_unit_address(dip)) {
947 		return (mptsas_iport_attach(dip, cmd));
948 	}
949 
950 	switch (cmd) {
951 	case DDI_ATTACH:
952 		break;
953 
954 	case DDI_RESUME:
955 		if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
956 			return (DDI_FAILURE);
957 
958 		mpt = TRAN2MPT(hba_tran);
959 
960 		if (!mpt) {
961 			return (DDI_FAILURE);
962 		}
963 
964 		/*
965 		 * Reset hardware and softc to "no outstanding commands"
966 		 * Note	that a check condition can result on first command
967 		 * to a	target.
968 		 */
969 		mutex_enter(&mpt->m_mutex);
970 
971 		/*
972 		 * raise power.
973 		 */
974 		if (mpt->m_options & MPTSAS_OPT_PM) {
975 			mutex_exit(&mpt->m_mutex);
976 			(void) pm_busy_component(dip, 0);
977 			if (mpt->m_power_level != PM_LEVEL_D0) {
978 				rval = pm_raise_power(dip, 0, PM_LEVEL_D0);
979 			} else {
980 				rval = pm_power_has_changed(dip, 0,
981 				    PM_LEVEL_D0);
982 			}
983 			if (rval == DDI_SUCCESS) {
984 				mutex_enter(&mpt->m_mutex);
985 			} else {
986 				/*
987 				 * The pm_raise_power() call above failed,
988 				 * and that can only occur if we were unable
989 				 * to reset the hardware.  This is probably
990 				 * due to unhealty hardware, and because
991 				 * important filesystems(such as the root
992 				 * filesystem) could be on the attached disks,
993 				 * it would not be a good idea to continue,
994 				 * as we won't be entirely certain we are
995 				 * writing correct data.  So we panic() here
996 				 * to not only prevent possible data corruption,
997 				 * but to give developers or end users a hope
998 				 * of identifying and correcting any problems.
999 				 */
1000 				fm_panic("mptsas could not reset hardware "
1001 				    "during resume");
1002 			}
1003 		}
1004 
1005 		mpt->m_suspended = 0;
1006 
1007 		/*
1008 		 * Reinitialize ioc
1009 		 */
1010 		if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1011 			mutex_exit(&mpt->m_mutex);
1012 			if (mpt->m_options & MPTSAS_OPT_PM) {
1013 				(void) pm_idle_component(dip, 0);
1014 			}
1015 			fm_panic("mptsas init chip fail during resume");
1016 		}
1017 		/*
1018 		 * mptsas_update_driver_data needs interrupts so enable them
1019 		 * first.
1020 		 */
1021 		MPTSAS_ENABLE_INTR(mpt);
1022 		mptsas_update_driver_data(mpt);
1023 
1024 		/* start requests, if possible */
1025 		mptsas_restart_hba(mpt);
1026 
1027 		mutex_exit(&mpt->m_mutex);
1028 
1029 		/*
1030 		 * Restart watch thread
1031 		 */
1032 		mutex_enter(&mptsas_global_mutex);
1033 		if (mptsas_timeout_id == 0) {
1034 			mptsas_timeout_id = timeout(mptsas_watch, NULL,
1035 			    mptsas_tick);
1036 			mptsas_timeouts_enabled = 1;
1037 		}
1038 		mutex_exit(&mptsas_global_mutex);
1039 
1040 		/* report idle status to pm framework */
1041 		if (mpt->m_options & MPTSAS_OPT_PM) {
1042 			(void) pm_idle_component(dip, 0);
1043 		}
1044 
1045 		return (DDI_SUCCESS);
1046 
1047 	default:
1048 		return (DDI_FAILURE);
1049 
1050 	}
1051 
1052 	instance = ddi_get_instance(dip);
1053 
1054 	/*
1055 	 * Allocate softc information.
1056 	 */
1057 	if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1058 		mptsas_log(NULL, CE_WARN,
1059 		    "mptsas%d: cannot allocate soft state", instance);
1060 		goto fail;
1061 	}
1062 
1063 	mpt = ddi_get_soft_state(mptsas_state, instance);
1064 
1065 	if (mpt == NULL) {
1066 		mptsas_log(NULL, CE_WARN,
1067 		    "mptsas%d: cannot get soft state", instance);
1068 		goto fail;
1069 	}
1070 
1071 	/* Allocate a transport structure */
1072 	hba_tran = mpt->m_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1073 	ASSERT(mpt->m_tran != NULL);
1074 
1075 	/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1076 	scsi_size_clean(dip);
1077 
1078 	mpt->m_dip = dip;
1079 	mpt->m_instance = instance;
1080 
1081 	/* Make a per-instance copy of the structures */
1082 	mpt->m_io_dma_attr = mptsas_dma_attrs64;
1083 	mpt->m_msg_dma_attr = mptsas_dma_attrs;
1084 	mpt->m_reg_acc_attr = mptsas_dev_attr;
1085 	mpt->m_dev_acc_attr = mptsas_dev_attr;
1086 
1087 	/*
1088 	 * Initialize FMA
1089 	 */
1090 	mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1091 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1092 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1093 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1094 
1095 	mptsas_fm_init(mpt);
1096 
1097 	if (pci_config_setup(mpt->m_dip,
1098 	    &mpt->m_config_handle) != DDI_SUCCESS) {
1099 		mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
1100 		goto fail;
1101 	}
1102 	config_setup++;
1103 
1104 	if (mptsas_alloc_handshake_msg(mpt,
1105 	    sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1106 		mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1107 		goto fail;
1108 	}
1109 
1110 	/*
1111 	 * This is a workaround for a XMITS ASIC bug which does not
1112 	 * drive the CBE upper bits.
1113 	 */
1114 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
1115 	    PCI_STAT_PERROR) {
1116 		pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
1117 		    PCI_STAT_PERROR);
1118 	}
1119 
1120 	/*
1121 	 * Setup configuration space
1122 	 */
1123 	if (mptsas_config_space_init(mpt) == FALSE) {
1124 		mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1125 		goto fail;
1126 	}
1127 
1128 	if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1129 	    0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1130 		mptsas_log(mpt, CE_WARN, "map setup failed");
1131 		goto fail;
1132 	}
1133 	map_setup++;
1134 
1135 	/*
1136 	 * A taskq is created for dealing with the event handler
1137 	 */
1138 	if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1139 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1140 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1141 		goto fail;
1142 	}
1143 	event_taskq_create++;
1144 
1145 	/*
1146 	 * A taskq is created for dealing with dr events
1147 	 */
1148 	if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1149 	    "mptsas_dr_taskq",
1150 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1151 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1152 		    "failed");
1153 		goto fail;
1154 	}
1155 	dr_taskq_create++;
1156 
1157 	mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1158 	    0, "mptsas_doneq_thread_threshold_prop", 10);
1159 	mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1160 	    0, "mptsas_doneq_length_threshold_prop", 8);
1161 	mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1162 	    0, "mptsas_doneq_thread_n_prop", 8);
1163 
1164 	if (mpt->m_doneq_thread_n) {
1165 		cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1166 		mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1167 
1168 		mutex_enter(&mpt->m_doneq_mutex);
1169 		mpt->m_doneq_thread_id =
1170 		    kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1171 		    * mpt->m_doneq_thread_n, KM_SLEEP);
1172 
1173 		for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1174 			cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1175 			    CV_DRIVER, NULL);
1176 			mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1177 			    MUTEX_DRIVER, NULL);
1178 			mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1179 			mpt->m_doneq_thread_id[j].flag |=
1180 			    MPTSAS_DONEQ_THREAD_ACTIVE;
1181 			mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1182 			mpt->m_doneq_thread_id[j].arg.t = j;
1183 			mpt->m_doneq_thread_id[j].threadp =
1184 			    thread_create(NULL, 0, mptsas_doneq_thread,
1185 			    &mpt->m_doneq_thread_id[j].arg,
1186 			    0, &p0, TS_RUN, minclsyspri);
1187 			mpt->m_doneq_thread_id[j].donetail =
1188 			    &mpt->m_doneq_thread_id[j].doneq;
1189 			mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1190 		}
1191 		mutex_exit(&mpt->m_doneq_mutex);
1192 		doneq_thread_create++;
1193 	}
1194 
1195 	/* Get supported interrupt types */
1196 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
1197 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
1198 		    "failed\n");
1199 		goto fail;
1200 	}
1201 
1202 	NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
1203 
1204 	if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
1205 		/*
1206 		 * Try MSI, but fall back to FIXED
1207 		 */
1208 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
1209 			NDBG0(("Using MSI interrupt type"));
1210 			mpt->m_intr_type = DDI_INTR_TYPE_MSI;
1211 			goto intr_done;
1212 		}
1213 	}
1214 
1215 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1216 
1217 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
1218 			NDBG0(("Using FIXED interrupt type"));
1219 			mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
1220 
1221 			goto intr_done;
1222 		}
1223 
1224 		NDBG0(("FIXED interrupt registration failed"));
1225 	}
1226 
1227 	goto fail;
1228 
1229 intr_done:
1230 	intr_added++;
1231 
1232 	/* Initialize mutex used in interrupt handler */
1233 	mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1234 	    DDI_INTR_PRI(mpt->m_intr_pri));
1235 	mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1236 	    DDI_INTR_PRI(mpt->m_intr_pri));
1237 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1238 		mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1239 		    NULL, MUTEX_DRIVER,
1240 		    DDI_INTR_PRI(mpt->m_intr_pri));
1241 	}
1242 
1243 	cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1244 	cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1245 	cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1246 	cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1247 	cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1248 	mutex_init_done++;
1249 
1250 	/*
1251 	 * Disable hardware interrupt since we're not ready to
1252 	 * handle it yet.
1253 	 */
1254 	MPTSAS_DISABLE_INTR(mpt);
1255 
1256 	/*
1257 	 * Enable interrupts
1258 	 */
1259 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
1260 		/* Call ddi_intr_block_enable() for MSI interrupts */
1261 		(void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
1262 	} else {
1263 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1264 		for (i = 0; i < mpt->m_intr_cnt; i++) {
1265 			(void) ddi_intr_enable(mpt->m_htable[i]);
1266 		}
1267 	}
1268 
1269 	mutex_enter(&mpt->m_mutex);
1270 	/*
1271 	 * Initialize power management component
1272 	 */
1273 	if (mpt->m_options & MPTSAS_OPT_PM) {
1274 		if (mptsas_init_pm(mpt)) {
1275 			mutex_exit(&mpt->m_mutex);
1276 			mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1277 			    "failed");
1278 			goto fail;
1279 		}
1280 	}
1281 
1282 	/*
1283 	 * Initialize chip
1284 	 */
1285 	if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1286 		mutex_exit(&mpt->m_mutex);
1287 		mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1288 		goto fail;
1289 	}
1290 
1291 	mutex_exit(&mpt->m_mutex);
1292 
1293 	/*
1294 	 * initialize SCSI HBA transport structure
1295 	 */
1296 	hba_tran->tran_hba_private	= mpt;
1297 	hba_tran->tran_tgt_private	= NULL;
1298 
1299 	hba_tran->tran_tgt_init		= mptsas_scsi_tgt_init;
1300 	hba_tran->tran_tgt_free		= mptsas_scsi_tgt_free;
1301 
1302 	hba_tran->tran_start		= mptsas_scsi_start;
1303 	hba_tran->tran_reset		= mptsas_scsi_reset;
1304 	hba_tran->tran_abort		= mptsas_scsi_abort;
1305 	hba_tran->tran_getcap		= mptsas_scsi_getcap;
1306 	hba_tran->tran_setcap		= mptsas_scsi_setcap;
1307 	hba_tran->tran_init_pkt		= mptsas_scsi_init_pkt;
1308 	hba_tran->tran_destroy_pkt	= mptsas_scsi_destroy_pkt;
1309 
1310 	hba_tran->tran_dmafree		= mptsas_scsi_dmafree;
1311 	hba_tran->tran_sync_pkt		= mptsas_scsi_sync_pkt;
1312 	hba_tran->tran_reset_notify	= mptsas_scsi_reset_notify;
1313 
1314 	hba_tran->tran_get_bus_addr	= mptsas_get_bus_addr;
1315 	hba_tran->tran_get_name		= mptsas_get_name;
1316 
1317 	hba_tran->tran_quiesce		= mptsas_scsi_quiesce;
1318 	hba_tran->tran_unquiesce	= mptsas_scsi_unquiesce;
1319 	hba_tran->tran_bus_reset	= NULL;
1320 
1321 	hba_tran->tran_add_eventcall	= NULL;
1322 	hba_tran->tran_get_eventcookie	= NULL;
1323 	hba_tran->tran_post_event	= NULL;
1324 	hba_tran->tran_remove_eventcall	= NULL;
1325 
1326 	hba_tran->tran_bus_config	= mptsas_bus_config;
1327 
1328 	hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
1329 
1330 	if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
1331 		goto fail;
1332 	}
1333 
1334 	/*
1335 	 * Register the iport for multiple port HBA
1336 	 */
1337 	/*
1338 	 * initial value of mask is 0
1339 	 */
1340 	mutex_enter(&mpt->m_mutex);
1341 	for (i = 0; i < mpt->m_num_phys; i++) {
1342 		mptsas_phymask_t phy_mask = 0x0;
1343 		char phy_mask_name[MPTSAS_MAX_PHYS];
1344 		uint8_t current_port;
1345 
1346 		if (mpt->m_phy_info[i].attached_devhdl == 0)
1347 			continue;
1348 
1349 		bzero(phy_mask_name, sizeof (phy_mask_name));
1350 
1351 		current_port = mpt->m_phy_info[i].port_num;
1352 
1353 		if ((mask & (1 << i)) != 0)
1354 			continue;
1355 
1356 		for (j = 0; j < mpt->m_num_phys; j++) {
1357 			if (mpt->m_phy_info[j].attached_devhdl &&
1358 			    (mpt->m_phy_info[j].port_num == current_port)) {
1359 				phy_mask |= (1 << j);
1360 			}
1361 		}
1362 		mask = mask | phy_mask;
1363 
1364 		for (j = 0; j < mpt->m_num_phys; j++) {
1365 			if ((phy_mask >> j) & 0x01) {
1366 				mpt->m_phy_info[j].phy_mask = phy_mask;
1367 			}
1368 		}
1369 
1370 		(void) sprintf(phy_mask_name, "%x", phy_mask);
1371 
1372 		mutex_exit(&mpt->m_mutex);
1373 		/*
1374 		 * register a iport
1375 		 */
1376 		(void) scsi_hba_iport_register(dip, phy_mask_name);
1377 		mutex_enter(&mpt->m_mutex);
1378 	}
1379 	mutex_exit(&mpt->m_mutex);
1380 
1381 	/*
1382 	 * register a virtual port for RAID volume always
1383 	 */
1384 	(void) scsi_hba_iport_register(dip, "v0");
1385 
1386 	/*
1387 	 * All children of the HBA are iports. We need tran was cloned.
1388 	 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
1389 	 * inherited to iport's tran vector.
1390 	 */
1391 	tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
1392 
1393 	if (scsi_hba_attach_setup(dip, &mpt->m_msg_dma_attr,
1394 	    hba_tran, tran_flags) != DDI_SUCCESS) {
1395 		mptsas_log(mpt, CE_WARN, "hba attach setup failed");
1396 		goto fail;
1397 	}
1398 	hba_attach_setup++;
1399 
1400 	mpt->m_smptran = smp_hba_tran_alloc(dip);
1401 	ASSERT(mpt->m_smptran != NULL);
1402 	mpt->m_smptran->smp_tran_hba_private = mpt;
1403 	mpt->m_smptran->smp_tran_start = mptsas_smp_start;
1404 	if (smp_hba_attach_setup(dip, mpt->m_smptran) != DDI_SUCCESS) {
1405 		mptsas_log(mpt, CE_WARN, "smp attach setup failed");
1406 		goto fail;
1407 	}
1408 	smp_attach_setup++;
1409 
1410 	/*
1411 	 * Initialize smp hash table
1412 	 */
1413 	mptsas_hash_init(&mpt->m_active->m_smptbl);
1414 	mpt->m_smp_devhdl = 0xFFFF;
1415 
1416 	/*
1417 	 * create kmem cache for packets
1418 	 */
1419 	(void) sprintf(buf, "mptsas%d_cache", instance);
1420 	mpt->m_kmem_cache = kmem_cache_create(buf,
1421 	    sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
1422 	    mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
1423 	    NULL, (void *)mpt, NULL, 0);
1424 
1425 	if (mpt->m_kmem_cache == NULL) {
1426 		mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
1427 		goto fail;
1428 	}
1429 
1430 	/*
1431 	 * create kmem cache for extra SGL frames if SGL cannot
1432 	 * be accomodated into main request frame.
1433 	 */
1434 	(void) sprintf(buf, "mptsas%d_cache_frames", instance);
1435 	mpt->m_cache_frames = kmem_cache_create(buf,
1436 	    sizeof (mptsas_cache_frames_t), 8,
1437 	    mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
1438 	    NULL, (void *)mpt, NULL, 0);
1439 
1440 	if (mpt->m_cache_frames == NULL) {
1441 		mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
1442 		goto fail;
1443 	}
1444 
1445 	mpt->m_scsi_reset_delay	= ddi_prop_get_int(DDI_DEV_T_ANY,
1446 	    dip, 0, "scsi-reset-delay",	SCSI_DEFAULT_RESET_DELAY);
1447 	if (mpt->m_scsi_reset_delay == 0) {
1448 		mptsas_log(mpt, CE_NOTE,
1449 		    "scsi_reset_delay of 0 is not recommended,"
1450 		    " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1451 		mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1452 	}
1453 
1454 	/*
1455 	 * Initialize the wait and done FIFO queue
1456 	 */
1457 	mpt->m_donetail = &mpt->m_doneq;
1458 	mpt->m_waitqtail = &mpt->m_waitq;
1459 
1460 	mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1461 	mpt->m_tx_draining = 0;
1462 
1463 	/*
1464 	 * ioc cmd queue initialize
1465 	 */
1466 	mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1467 
1468 	mpt->m_dev_handle = 0xFFFF;
1469 
1470 	MPTSAS_ENABLE_INTR(mpt);
1471 
1472 	/*
1473 	 * enable event notification
1474 	 */
1475 	mutex_enter(&mpt->m_mutex);
1476 	if (mptsas_ioc_enable_event_notification(mpt)) {
1477 		mutex_exit(&mpt->m_mutex);
1478 		goto fail;
1479 	}
1480 
1481 	/*
1482 	 * Initialize PHY info for smhba
1483 	 */
1484 	if (mptsas_smhba_phy_init(mpt)) {
1485 		mutex_exit(&mpt->m_mutex);
1486 		mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1487 		    "failed");
1488 		goto fail;
1489 	}
1490 
1491 	mutex_exit(&mpt->m_mutex);
1492 
1493 
1494 	/* Check all dma handles allocated in attach */
1495 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1496 	    != DDI_SUCCESS) ||
1497 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1498 	    != DDI_SUCCESS) ||
1499 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1500 	    != DDI_SUCCESS) ||
1501 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1502 	    != DDI_SUCCESS) ||
1503 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1504 	    != DDI_SUCCESS)) {
1505 		goto fail;
1506 	}
1507 
1508 	/* Check all acc handles allocated in attach */
1509 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1510 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1511 	    != DDI_SUCCESS) ||
1512 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1513 	    != DDI_SUCCESS) ||
1514 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1515 	    != DDI_SUCCESS) ||
1516 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1517 	    != DDI_SUCCESS) ||
1518 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1519 	    != DDI_SUCCESS) ||
1520 	    (mptsas_check_acc_handle(mpt->m_config_handle)
1521 	    != DDI_SUCCESS)) {
1522 		goto fail;
1523 	}
1524 
1525 	/*
1526 	 * After this point, we are not going to fail the attach.
1527 	 */
1528 	/*
1529 	 * used for mptsas_watch
1530 	 */
1531 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1532 	if (mptsas_head == NULL) {
1533 		mptsas_head = mpt;
1534 	} else {
1535 		mptsas_tail->m_next = mpt;
1536 	}
1537 	mptsas_tail = mpt;
1538 	rw_exit(&mptsas_global_rwlock);
1539 
1540 	mutex_enter(&mptsas_global_mutex);
1541 	if (mptsas_timeouts_enabled == 0) {
1542 		mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1543 		    dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1544 
1545 		mptsas_tick = mptsas_scsi_watchdog_tick *
1546 		    drv_usectohz((clock_t)1000000);
1547 
1548 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1549 		mptsas_timeouts_enabled = 1;
1550 	}
1551 	mutex_exit(&mptsas_global_mutex);
1552 
1553 	/* Print message of HBA present */
1554 	ddi_report_dev(dip);
1555 
1556 	/* SM-HBA support */
1557 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_INT32, MPTSAS_SMHBA_SUPPORTED,
1558 	    &sm_hba);
1559 
1560 	/* SM-HBA driver version */
1561 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_STRING, MPTSAS_DRV_VERSION,
1562 	    mptsas_driver_rev);
1563 
1564 	/* SM-HBA hardware version */
1565 	chiprev = 'A' + mpt->m_revid;
1566 	(void) snprintf(hw_rev, 2, "%s", &chiprev);
1567 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_STRING, MPTSAS_HWARE_VERSION,
1568 	    hw_rev);
1569 
1570 	/* SM-HBA phy number per HBA */
1571 	num_phys = mpt->m_num_phys;
1572 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_INT32, MPTSAS_NUM_PHYS_HBA,
1573 	    &num_phys);
1574 
1575 	/* SM-HBA protocal support */
1576 	protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT;
1577 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_INT32,
1578 	    MPTSAS_SUPPORTED_PROTOCOL, &protocol);
1579 
1580 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_STRING, MPTSAS_MANUFACTURER,
1581 	    mpt->m_MANU_page0.ChipName);
1582 
1583 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_STRING, MPTSAS_MODEL_NAME,
1584 	    mpt->m_MANU_page0.BoardName);
1585 
1586 	/*
1587 	 * VPD data is not available, we make a serial number for this.
1588 	 */
1589 
1590 	(void) sprintf(serial_number, "%s%s%s%s%s",
1591 	    mpt->m_MANU_page0.ChipName,
1592 	    mpt->m_MANU_page0.ChipRevision,
1593 	    mpt->m_MANU_page0.BoardName,
1594 	    mpt->m_MANU_page0.BoardAssembly,
1595 	    mpt->m_MANU_page0.BoardTracerNumber);
1596 
1597 	mptsas_smhba_add_hba_prop(mpt, DATA_TYPE_STRING, MPTSAS_SERIAL_NUMBER,
1598 	    &serial_number[0]);
1599 
1600 	/* report idle status to pm framework */
1601 	if (mpt->m_options & MPTSAS_OPT_PM) {
1602 		(void) pm_idle_component(dip, 0);
1603 	}
1604 
1605 	return (DDI_SUCCESS);
1606 
1607 fail:
1608 	mptsas_log(mpt, CE_WARN, "attach failed");
1609 	mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1610 	ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1611 	if (mpt) {
1612 		mutex_enter(&mptsas_global_mutex);
1613 
1614 		if (mptsas_timeout_id && (mptsas_head == NULL)) {
1615 			timeout_id_t tid = mptsas_timeout_id;
1616 			mptsas_timeouts_enabled = 0;
1617 			mptsas_timeout_id = 0;
1618 			mutex_exit(&mptsas_global_mutex);
1619 			(void) untimeout(tid);
1620 			mutex_enter(&mptsas_global_mutex);
1621 		}
1622 		mutex_exit(&mptsas_global_mutex);
1623 		/* deallocate in reverse order */
1624 		if (mpt->m_cache_frames) {
1625 			kmem_cache_destroy(mpt->m_cache_frames);
1626 		}
1627 		if (mpt->m_kmem_cache) {
1628 			kmem_cache_destroy(mpt->m_kmem_cache);
1629 		}
1630 		if (hba_attach_setup) {
1631 			(void) scsi_hba_detach(dip);
1632 		}
1633 		if (smp_attach_setup) {
1634 			(void) smp_hba_detach(dip);
1635 		}
1636 		if (intr_added) {
1637 			mptsas_rem_intrs(mpt);
1638 		}
1639 		if (doneq_thread_create) {
1640 			mutex_enter(&mpt->m_doneq_mutex);
1641 			doneq_thread_num = mpt->m_doneq_thread_n;
1642 			for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1643 				mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1644 				mpt->m_doneq_thread_id[j].flag &=
1645 				    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1646 				cv_signal(&mpt->m_doneq_thread_id[j].cv);
1647 				mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1648 			}
1649 			while (mpt->m_doneq_thread_n) {
1650 				cv_wait(&mpt->m_doneq_thread_cv,
1651 				    &mpt->m_doneq_mutex);
1652 			}
1653 			for (j = 0; j < doneq_thread_num; j++) {
1654 				cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1655 				mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1656 			}
1657 			kmem_free(mpt->m_doneq_thread_id,
1658 			    sizeof (mptsas_doneq_thread_list_t)
1659 			    * doneq_thread_num);
1660 			mutex_exit(&mpt->m_doneq_mutex);
1661 			cv_destroy(&mpt->m_doneq_thread_cv);
1662 			mutex_destroy(&mpt->m_doneq_mutex);
1663 		}
1664 		if (event_taskq_create) {
1665 			ddi_taskq_destroy(mpt->m_event_taskq);
1666 		}
1667 		if (dr_taskq_create) {
1668 			ddi_taskq_destroy(mpt->m_dr_taskq);
1669 		}
1670 		if (mutex_init_done) {
1671 			mutex_destroy(&mpt->m_tx_waitq_mutex);
1672 			mutex_destroy(&mpt->m_mutex);
1673 			for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1674 				mutex_destroy(
1675 				    &mpt->m_phy_info[i].smhba_info.phy_mutex);
1676 			}
1677 			cv_destroy(&mpt->m_cv);
1678 			cv_destroy(&mpt->m_passthru_cv);
1679 			cv_destroy(&mpt->m_fw_cv);
1680 			cv_destroy(&mpt->m_config_cv);
1681 			cv_destroy(&mpt->m_fw_diag_cv);
1682 		}
1683 		mptsas_free_handshake_msg(mpt);
1684 		mptsas_hba_fini(mpt);
1685 		if (map_setup) {
1686 			mptsas_cfg_fini(mpt);
1687 		}
1688 		if (config_setup) {
1689 			pci_config_teardown(&mpt->m_config_handle);
1690 		}
1691 		if (mpt->m_tran) {
1692 			scsi_hba_tran_free(mpt->m_tran);
1693 			mpt->m_tran = NULL;
1694 		}
1695 		if (mpt->m_smptran) {
1696 			smp_hba_tran_free(mpt->m_smptran);
1697 			mpt->m_smptran = NULL;
1698 		}
1699 		mptsas_fm_fini(mpt);
1700 		ddi_soft_state_free(mptsas_state, instance);
1701 		ddi_prop_remove_all(dip);
1702 	}
1703 	return (DDI_FAILURE);
1704 }
1705 
1706 static int
1707 mptsas_suspend(dev_info_t *devi)
1708 {
1709 	mptsas_t	*mpt, *g;
1710 	scsi_hba_tran_t	*tran;
1711 
1712 	if (scsi_hba_iport_unit_address(devi)) {
1713 		return (DDI_SUCCESS);
1714 	}
1715 
1716 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1717 		return (DDI_SUCCESS);
1718 
1719 	mpt = TRAN2MPT(tran);
1720 	if (!mpt) {
1721 		return (DDI_SUCCESS);
1722 	}
1723 
1724 	mutex_enter(&mpt->m_mutex);
1725 
1726 	if (mpt->m_suspended++) {
1727 		mutex_exit(&mpt->m_mutex);
1728 		return (DDI_SUCCESS);
1729 	}
1730 
1731 	/*
1732 	 * Cancel timeout threads for this mpt
1733 	 */
1734 	if (mpt->m_quiesce_timeid) {
1735 		timeout_id_t tid = mpt->m_quiesce_timeid;
1736 		mpt->m_quiesce_timeid = 0;
1737 		mutex_exit(&mpt->m_mutex);
1738 		(void) untimeout(tid);
1739 		mutex_enter(&mpt->m_mutex);
1740 	}
1741 
1742 	if (mpt->m_restart_cmd_timeid) {
1743 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
1744 		mpt->m_restart_cmd_timeid = 0;
1745 		mutex_exit(&mpt->m_mutex);
1746 		(void) untimeout(tid);
1747 		mutex_enter(&mpt->m_mutex);
1748 	}
1749 
1750 	if (mpt->m_pm_timeid != 0) {
1751 		timeout_id_t tid = mpt->m_pm_timeid;
1752 		mpt->m_pm_timeid = 0;
1753 		mutex_exit(&mpt->m_mutex);
1754 		(void) untimeout(tid);
1755 		/*
1756 		 * Report idle status for last ioctl since
1757 		 * calls to pm_busy_component(9F) are stacked.
1758 		 */
1759 		(void) pm_idle_component(mpt->m_dip, 0);
1760 		mutex_enter(&mpt->m_mutex);
1761 	}
1762 	mutex_exit(&mpt->m_mutex);
1763 
1764 	/*
1765 	 * Cancel watch threads if all mpts suspended
1766 	 */
1767 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1768 	for (g = mptsas_head; g != NULL; g = g->m_next) {
1769 		if (!g->m_suspended)
1770 			break;
1771 	}
1772 	rw_exit(&mptsas_global_rwlock);
1773 
1774 	mutex_enter(&mptsas_global_mutex);
1775 	if (g == NULL) {
1776 		timeout_id_t tid;
1777 
1778 		mptsas_timeouts_enabled = 0;
1779 		if (mptsas_timeout_id) {
1780 			tid = mptsas_timeout_id;
1781 			mptsas_timeout_id = 0;
1782 			mutex_exit(&mptsas_global_mutex);
1783 			(void) untimeout(tid);
1784 			mutex_enter(&mptsas_global_mutex);
1785 		}
1786 		if (mptsas_reset_watch) {
1787 			tid = mptsas_reset_watch;
1788 			mptsas_reset_watch = 0;
1789 			mutex_exit(&mptsas_global_mutex);
1790 			(void) untimeout(tid);
1791 			mutex_enter(&mptsas_global_mutex);
1792 		}
1793 	}
1794 	mutex_exit(&mptsas_global_mutex);
1795 
1796 	mutex_enter(&mpt->m_mutex);
1797 
1798 	/*
1799 	 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1800 	 */
1801 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
1802 	    (mpt->m_power_level != PM_LEVEL_D0)) {
1803 		mutex_exit(&mpt->m_mutex);
1804 		return (DDI_SUCCESS);
1805 	}
1806 
1807 	/* Disable HBA interrupts in hardware */
1808 	MPTSAS_DISABLE_INTR(mpt);
1809 	/*
1810 	 * Send RAID action system shutdown to sync IR
1811 	 */
1812 	mptsas_raid_action_system_shutdown(mpt);
1813 
1814 	mutex_exit(&mpt->m_mutex);
1815 
1816 	/* drain the taskq */
1817 	ddi_taskq_wait(mpt->m_event_taskq);
1818 	ddi_taskq_wait(mpt->m_dr_taskq);
1819 
1820 	return (DDI_SUCCESS);
1821 }
1822 
1823 #ifdef	__sparc
1824 /*ARGSUSED*/
1825 static int
1826 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1827 {
1828 	mptsas_t	*mpt;
1829 	scsi_hba_tran_t *tran;
1830 
1831 	/*
1832 	 * If this call is for iport, just return.
1833 	 */
1834 	if (scsi_hba_iport_unit_address(devi))
1835 		return (DDI_SUCCESS);
1836 
1837 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1838 		return (DDI_SUCCESS);
1839 
1840 	if ((mpt = TRAN2MPT(tran)) == NULL)
1841 		return (DDI_SUCCESS);
1842 
1843 	/*
1844 	 * Send RAID action system shutdown to sync IR.  Disable HBA
1845 	 * interrupts in hardware first.
1846 	 */
1847 	MPTSAS_DISABLE_INTR(mpt);
1848 	mptsas_raid_action_system_shutdown(mpt);
1849 
1850 	return (DDI_SUCCESS);
1851 }
1852 #else /* __sparc */
1853 /*
1854  * quiesce(9E) entry point.
1855  *
1856  * This function is called when the system is single-threaded at high
1857  * PIL with preemption disabled. Therefore, this function must not be
1858  * blocked.
1859  *
1860  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1861  * DDI_FAILURE indicates an error condition and should almost never happen.
1862  */
1863 static int
1864 mptsas_quiesce(dev_info_t *devi)
1865 {
1866 	mptsas_t	*mpt;
1867 	scsi_hba_tran_t *tran;
1868 
1869 	/*
1870 	 * If this call is for iport, just return.
1871 	 */
1872 	if (scsi_hba_iport_unit_address(devi))
1873 		return (DDI_SUCCESS);
1874 
1875 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1876 		return (DDI_SUCCESS);
1877 
1878 	if ((mpt = TRAN2MPT(tran)) == NULL)
1879 		return (DDI_SUCCESS);
1880 
1881 	/* Disable HBA interrupts in hardware */
1882 	MPTSAS_DISABLE_INTR(mpt);
1883 	/* Send RAID action system shutdonw to sync IR */
1884 	mptsas_raid_action_system_shutdown(mpt);
1885 
1886 	return (DDI_SUCCESS);
1887 }
1888 #endif	/* __sparc */
1889 
1890 /*
1891  * detach(9E).	Remove all device allocations and system resources;
1892  * disable device interrupts.
1893  * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1894  */
1895 static int
1896 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1897 {
1898 	/* CONSTCOND */
1899 	ASSERT(NO_COMPETING_THREADS);
1900 	NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1901 
1902 	switch (cmd) {
1903 	case DDI_DETACH:
1904 		return (mptsas_do_detach(devi));
1905 
1906 	case DDI_SUSPEND:
1907 		return (mptsas_suspend(devi));
1908 
1909 	default:
1910 		return (DDI_FAILURE);
1911 	}
1912 	/* NOTREACHED */
1913 }
1914 
1915 static int
1916 mptsas_do_detach(dev_info_t *dip)
1917 {
1918 	mptsas_t	*mpt, *m;
1919 	scsi_hba_tran_t	*tran;
1920 	mptsas_slots_t	*active;
1921 	int		circ = 0;
1922 	int		circ1 = 0;
1923 	mdi_pathinfo_t	*pip = NULL;
1924 	int		i;
1925 	int		doneq_thread_num = 0;
1926 
1927 	NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1928 
1929 	if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1930 		return (DDI_FAILURE);
1931 
1932 	mpt = TRAN2MPT(tran);
1933 	if (!mpt) {
1934 		return (DDI_FAILURE);
1935 	}
1936 	/*
1937 	 * Still have pathinfo child, should not detach mpt driver
1938 	 */
1939 	if (scsi_hba_iport_unit_address(dip)) {
1940 		if (mpt->m_mpxio_enable) {
1941 			/*
1942 			 * MPxIO enabled for the iport
1943 			 */
1944 			ndi_devi_enter(scsi_vhci_dip, &circ1);
1945 			ndi_devi_enter(dip, &circ);
1946 			while (pip = mdi_get_next_client_path(dip, NULL)) {
1947 				if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1948 					continue;
1949 				}
1950 				ndi_devi_exit(dip, circ);
1951 				ndi_devi_exit(scsi_vhci_dip, circ1);
1952 				NDBG12(("detach failed because of "
1953 				    "outstanding path info"));
1954 				return (DDI_FAILURE);
1955 			}
1956 			ndi_devi_exit(dip, circ);
1957 			ndi_devi_exit(scsi_vhci_dip, circ1);
1958 			(void) mdi_phci_unregister(dip, 0);
1959 		}
1960 
1961 		ddi_prop_remove_all(dip);
1962 
1963 		return (DDI_SUCCESS);
1964 	}
1965 
1966 	/* Make sure power level is D0 before accessing registers */
1967 	if (mpt->m_options & MPTSAS_OPT_PM) {
1968 		(void) pm_busy_component(dip, 0);
1969 		if (mpt->m_power_level != PM_LEVEL_D0) {
1970 			if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1971 			    DDI_SUCCESS) {
1972 				mptsas_log(mpt, CE_WARN,
1973 				    "mptsas%d: Raise power request failed.",
1974 				    mpt->m_instance);
1975 				(void) pm_idle_component(dip, 0);
1976 				return (DDI_FAILURE);
1977 			}
1978 		}
1979 	}
1980 
1981 	mutex_enter(&mpt->m_mutex);
1982 	MPTSAS_DISABLE_INTR(mpt);
1983 	mutex_exit(&mpt->m_mutex);
1984 	mptsas_rem_intrs(mpt);
1985 	ddi_taskq_destroy(mpt->m_event_taskq);
1986 	ddi_taskq_destroy(mpt->m_dr_taskq);
1987 
1988 	if (mpt->m_doneq_thread_n) {
1989 		mutex_enter(&mpt->m_doneq_mutex);
1990 		doneq_thread_num = mpt->m_doneq_thread_n;
1991 		for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1992 			mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1993 			mpt->m_doneq_thread_id[i].flag &=
1994 			    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1995 			cv_signal(&mpt->m_doneq_thread_id[i].cv);
1996 			mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1997 		}
1998 		while (mpt->m_doneq_thread_n) {
1999 			cv_wait(&mpt->m_doneq_thread_cv,
2000 			    &mpt->m_doneq_mutex);
2001 		}
2002 		for (i = 0;  i < doneq_thread_num; i++) {
2003 			cv_destroy(&mpt->m_doneq_thread_id[i].cv);
2004 			mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
2005 		}
2006 		kmem_free(mpt->m_doneq_thread_id,
2007 		    sizeof (mptsas_doneq_thread_list_t)
2008 		    * doneq_thread_num);
2009 		mutex_exit(&mpt->m_doneq_mutex);
2010 		cv_destroy(&mpt->m_doneq_thread_cv);
2011 		mutex_destroy(&mpt->m_doneq_mutex);
2012 	}
2013 
2014 	scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
2015 
2016 	/*
2017 	 * Remove device instance from the global linked list
2018 	 */
2019 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
2020 	if (mptsas_head == mpt) {
2021 		m = mptsas_head = mpt->m_next;
2022 	} else {
2023 		for (m = mptsas_head; m != NULL; m = m->m_next) {
2024 			if (m->m_next == mpt) {
2025 				m->m_next = mpt->m_next;
2026 				break;
2027 			}
2028 		}
2029 		if (m == NULL) {
2030 			mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2031 		}
2032 	}
2033 
2034 	if (mptsas_tail == mpt) {
2035 		mptsas_tail = m;
2036 	}
2037 	rw_exit(&mptsas_global_rwlock);
2038 
2039 	/*
2040 	 * Cancel timeout threads for this mpt
2041 	 */
2042 	mutex_enter(&mpt->m_mutex);
2043 	if (mpt->m_quiesce_timeid) {
2044 		timeout_id_t tid = mpt->m_quiesce_timeid;
2045 		mpt->m_quiesce_timeid = 0;
2046 		mutex_exit(&mpt->m_mutex);
2047 		(void) untimeout(tid);
2048 		mutex_enter(&mpt->m_mutex);
2049 	}
2050 
2051 	if (mpt->m_restart_cmd_timeid) {
2052 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
2053 		mpt->m_restart_cmd_timeid = 0;
2054 		mutex_exit(&mpt->m_mutex);
2055 		(void) untimeout(tid);
2056 		mutex_enter(&mpt->m_mutex);
2057 	}
2058 
2059 	if (mpt->m_pm_timeid != 0) {
2060 		timeout_id_t tid = mpt->m_pm_timeid;
2061 		mpt->m_pm_timeid = 0;
2062 		mutex_exit(&mpt->m_mutex);
2063 		(void) untimeout(tid);
2064 		/*
2065 		 * Report idle status for last ioctl since
2066 		 * calls to pm_busy_component(9F) are stacked.
2067 		 */
2068 		(void) pm_idle_component(mpt->m_dip, 0);
2069 		mutex_enter(&mpt->m_mutex);
2070 	}
2071 	mutex_exit(&mpt->m_mutex);
2072 
2073 	/*
2074 	 * last mpt? ... if active, CANCEL watch threads.
2075 	 */
2076 	mutex_enter(&mptsas_global_mutex);
2077 	if (mptsas_head == NULL) {
2078 		timeout_id_t tid;
2079 		/*
2080 		 * Clear mptsas_timeouts_enable so that the watch thread
2081 		 * gets restarted on DDI_ATTACH
2082 		 */
2083 		mptsas_timeouts_enabled = 0;
2084 		if (mptsas_timeout_id) {
2085 			tid = mptsas_timeout_id;
2086 			mptsas_timeout_id = 0;
2087 			mutex_exit(&mptsas_global_mutex);
2088 			(void) untimeout(tid);
2089 			mutex_enter(&mptsas_global_mutex);
2090 		}
2091 		if (mptsas_reset_watch) {
2092 			tid = mptsas_reset_watch;
2093 			mptsas_reset_watch = 0;
2094 			mutex_exit(&mptsas_global_mutex);
2095 			(void) untimeout(tid);
2096 			mutex_enter(&mptsas_global_mutex);
2097 		}
2098 	}
2099 	mutex_exit(&mptsas_global_mutex);
2100 
2101 	/*
2102 	 * Delete Phy stats
2103 	 */
2104 	mptsas_destroy_phy_stats(mpt);
2105 
2106 	/*
2107 	 * Delete nt_active.
2108 	 */
2109 	active = mpt->m_active;
2110 	mutex_enter(&mpt->m_mutex);
2111 	mptsas_hash_uninit(&active->m_smptbl, sizeof (mptsas_smp_t));
2112 	mutex_exit(&mpt->m_mutex);
2113 
2114 	if (active) {
2115 		kmem_free(active, active->m_size);
2116 		mpt->m_active = NULL;
2117 	}
2118 
2119 	/* deallocate everything that was allocated in mptsas_attach */
2120 	mptsas_fm_fini(mpt);
2121 	kmem_cache_destroy(mpt->m_cache_frames);
2122 	kmem_cache_destroy(mpt->m_kmem_cache);
2123 
2124 	(void) scsi_hba_detach(dip);
2125 	(void) smp_hba_detach(dip);
2126 	mptsas_free_handshake_msg(mpt);
2127 	mptsas_hba_fini(mpt);
2128 	mptsas_cfg_fini(mpt);
2129 
2130 	/* Lower the power informing PM Framework */
2131 	if (mpt->m_options & MPTSAS_OPT_PM) {
2132 		if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2133 			mptsas_log(mpt, CE_WARN,
2134 			    "!mptsas%d: Lower power request failed "
2135 			    "during detach, ignoring.",
2136 			    mpt->m_instance);
2137 	}
2138 
2139 	mutex_destroy(&mpt->m_tx_waitq_mutex);
2140 	mutex_destroy(&mpt->m_mutex);
2141 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2142 		mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2143 	}
2144 	cv_destroy(&mpt->m_cv);
2145 	cv_destroy(&mpt->m_passthru_cv);
2146 	cv_destroy(&mpt->m_fw_cv);
2147 	cv_destroy(&mpt->m_config_cv);
2148 	cv_destroy(&mpt->m_fw_diag_cv);
2149 
2150 	pci_config_teardown(&mpt->m_config_handle);
2151 	if (mpt->m_tran) {
2152 		scsi_hba_tran_free(mpt->m_tran);
2153 		mpt->m_tran = NULL;
2154 	}
2155 
2156 	if (mpt->m_smptran) {
2157 		smp_hba_tran_free(mpt->m_smptran);
2158 		mpt->m_smptran = NULL;
2159 	}
2160 
2161 	ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2162 	ddi_prop_remove_all(dip);
2163 
2164 	return (DDI_SUCCESS);
2165 }
2166 
2167 static int
2168 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2169 {
2170 	ddi_dma_attr_t		task_dma_attrs;
2171 	ddi_dma_cookie_t	tmp_dma_cookie;
2172 	size_t			alloc_len;
2173 	uint_t			ncookie;
2174 
2175 	/* allocate Task Management ddi_dma resources */
2176 	task_dma_attrs = mpt->m_msg_dma_attr;
2177 	task_dma_attrs.dma_attr_sgllen = 1;
2178 	task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2179 
2180 	if (ddi_dma_alloc_handle(mpt->m_dip, &task_dma_attrs,
2181 	    DDI_DMA_SLEEP, NULL, &mpt->m_hshk_dma_hdl) != DDI_SUCCESS) {
2182 		mpt->m_hshk_dma_hdl = NULL;
2183 		return (DDI_FAILURE);
2184 	}
2185 
2186 	if (ddi_dma_mem_alloc(mpt->m_hshk_dma_hdl, alloc_size,
2187 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2188 	    &mpt->m_hshk_memp, &alloc_len, &mpt->m_hshk_acc_hdl)
2189 	    != DDI_SUCCESS) {
2190 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
2191 		mpt->m_hshk_dma_hdl = NULL;
2192 		return (DDI_FAILURE);
2193 	}
2194 
2195 	if (ddi_dma_addr_bind_handle(mpt->m_hshk_dma_hdl, NULL,
2196 	    mpt->m_hshk_memp, alloc_len, (DDI_DMA_RDWR | DDI_DMA_CONSISTENT),
2197 	    DDI_DMA_SLEEP, NULL, &tmp_dma_cookie, &ncookie)
2198 	    != DDI_DMA_MAPPED) {
2199 		(void) ddi_dma_mem_free(&mpt->m_hshk_acc_hdl);
2200 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
2201 		mpt->m_hshk_dma_hdl = NULL;
2202 		return (DDI_FAILURE);
2203 	}
2204 	mpt->m_hshk_dma_size = alloc_size;
2205 	return (DDI_SUCCESS);
2206 }
2207 
2208 static void
2209 mptsas_free_handshake_msg(mptsas_t *mpt)
2210 {
2211 	if (mpt->m_hshk_dma_hdl != NULL) {
2212 		(void) ddi_dma_unbind_handle(mpt->m_hshk_dma_hdl);
2213 		(void) ddi_dma_mem_free(&mpt->m_hshk_acc_hdl);
2214 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
2215 		mpt->m_hshk_dma_hdl = NULL;
2216 		mpt->m_hshk_dma_size = 0;
2217 	}
2218 }
2219 
2220 static int
2221 mptsas_power(dev_info_t *dip, int component, int level)
2222 {
2223 #ifndef __lock_lint
2224 	_NOTE(ARGUNUSED(component))
2225 #endif
2226 	mptsas_t	*mpt;
2227 	int		rval = DDI_SUCCESS;
2228 	int		polls = 0;
2229 	uint32_t	ioc_status;
2230 
2231 	if (scsi_hba_iport_unit_address(dip) != 0)
2232 		return (DDI_SUCCESS);
2233 
2234 	mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2235 	if (mpt == NULL) {
2236 		return (DDI_FAILURE);
2237 	}
2238 
2239 	mutex_enter(&mpt->m_mutex);
2240 
2241 	/*
2242 	 * If the device is busy, don't lower its power level
2243 	 */
2244 	if (mpt->m_busy && (mpt->m_power_level > level)) {
2245 		mutex_exit(&mpt->m_mutex);
2246 		return (DDI_FAILURE);
2247 	}
2248 
2249 	switch (level) {
2250 	case PM_LEVEL_D0:
2251 		NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2252 		MPTSAS_POWER_ON(mpt);
2253 		/*
2254 		 * Wait up to 30 seconds for IOC to come out of reset.
2255 		 */
2256 		while (((ioc_status = ddi_get32(mpt->m_datap,
2257 		    &mpt->m_reg->Doorbell)) &
2258 		    MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2259 			if (polls++ > 3000) {
2260 				break;
2261 			}
2262 			delay(drv_usectohz(10000));
2263 		}
2264 		/*
2265 		 * If IOC is not in operational state, try to hard reset it.
2266 		 */
2267 		if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2268 		    MPI2_IOC_STATE_OPERATIONAL) {
2269 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2270 				mptsas_log(mpt, CE_WARN,
2271 				    "mptsas_power: hard reset failed");
2272 				mutex_exit(&mpt->m_mutex);
2273 				return (DDI_FAILURE);
2274 			}
2275 		}
2276 		mpt->m_power_level = PM_LEVEL_D0;
2277 		break;
2278 	case PM_LEVEL_D3:
2279 		NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2280 		MPTSAS_POWER_OFF(mpt);
2281 		break;
2282 	default:
2283 		mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2284 		    mpt->m_instance, level);
2285 		rval = DDI_FAILURE;
2286 		break;
2287 	}
2288 	mutex_exit(&mpt->m_mutex);
2289 	return (rval);
2290 }
2291 
2292 /*
2293  * Initialize configuration space and figure out which
2294  * chip and revison of the chip the mpt driver is using.
2295  */
2296 int
2297 mptsas_config_space_init(mptsas_t *mpt)
2298 {
2299 	ushort_t	caps_ptr, cap, cap_count;
2300 
2301 	NDBG0(("mptsas_config_space_init"));
2302 
2303 	mptsas_setup_cmd_reg(mpt);
2304 
2305 	/*
2306 	 * Get the chip device id:
2307 	 */
2308 	mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2309 
2310 	/*
2311 	 * Save the revision.
2312 	 */
2313 	mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2314 
2315 	/*
2316 	 * Save the SubSystem Vendor and Device IDs
2317 	 */
2318 	mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2319 	mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2320 
2321 	/*
2322 	 * Set the latency timer to 0x40 as specified by the upa -> pci
2323 	 * bridge chip design team.  This may be done by the sparc pci
2324 	 * bus nexus driver, but the driver should make sure the latency
2325 	 * timer is correct for performance reasons.
2326 	 */
2327 	pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2328 	    MPTSAS_LATENCY_TIMER);
2329 
2330 	/*
2331 	 * Check if capabilities list is supported and if so,
2332 	 * get initial capabilities pointer and clear bits 0,1.
2333 	 */
2334 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
2335 	    & PCI_STAT_CAP) {
2336 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
2337 		    PCI_CONF_CAP_PTR), 4);
2338 	} else {
2339 		caps_ptr = PCI_CAP_NEXT_PTR_NULL;
2340 	}
2341 
2342 	/*
2343 	 * Walk capabilities if supported.
2344 	 */
2345 	for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
2346 
2347 		/*
2348 		 * Check that we haven't exceeded the maximum number of
2349 		 * capabilities and that the pointer is in a valid range.
2350 		 */
2351 		if (++cap_count > 48) {
2352 			mptsas_log(mpt, CE_WARN,
2353 			    "too many device capabilities.\n");
2354 			return (FALSE);
2355 		}
2356 		if (caps_ptr < 64) {
2357 			mptsas_log(mpt, CE_WARN,
2358 			    "capabilities pointer 0x%x out of range.\n",
2359 			    caps_ptr);
2360 			return (FALSE);
2361 		}
2362 
2363 		/*
2364 		 * Get next capability and check that it is valid.
2365 		 * For now, we only support power management.
2366 		 */
2367 		cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
2368 		switch (cap) {
2369 			case PCI_CAP_ID_PM:
2370 				mptsas_log(mpt, CE_NOTE,
2371 				    "?mptsas%d supports power management.\n",
2372 				    mpt->m_instance);
2373 				mpt->m_options |= MPTSAS_OPT_PM;
2374 
2375 				/* Save PMCSR offset */
2376 				mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
2377 				break;
2378 
2379 			/*
2380 			 * 0x5 is Message signaled interrupts and 0x7
2381 			 * is pci-x capable.  Both are unsupported for now
2382 			 * but supported by the 1030 chip so we don't
2383 			 * need to keep printing out the notice.
2384 			 * 0x10 is PCI-E support (1064E/1068E)
2385 			 * 0x11 is MSIX supported by the 1064/1068
2386 			 */
2387 			case 0x5:
2388 			case 0x7:
2389 			case 0x10:
2390 			case 0x11:
2391 				break;
2392 			default:
2393 				mptsas_log(mpt, CE_NOTE,
2394 				    "?mptsas%d unrecognized capability "
2395 				    "0x%x.\n", mpt->m_instance, cap);
2396 			break;
2397 		}
2398 
2399 		/*
2400 		 * Get next capabilities pointer and clear bits 0,1.
2401 		 */
2402 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
2403 		    (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
2404 	}
2405 
2406 	return (TRUE);
2407 }
2408 
2409 static void
2410 mptsas_setup_cmd_reg(mptsas_t *mpt)
2411 {
2412 	ushort_t	cmdreg;
2413 
2414 	/*
2415 	 * Set the command register to the needed values.
2416 	 */
2417 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2418 	cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2419 	    PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2420 	cmdreg &= ~PCI_COMM_IO;
2421 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2422 }
2423 
2424 static void
2425 mptsas_disable_bus_master(mptsas_t *mpt)
2426 {
2427 	ushort_t	cmdreg;
2428 
2429 	/*
2430 	 * Clear the master enable bit in the PCI command register.
2431 	 * This prevents any bus mastering activity like DMA.
2432 	 */
2433 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2434 	cmdreg &= ~PCI_COMM_ME;
2435 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2436 }
2437 
2438 int
2439 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2440 {
2441 	ddi_dma_attr_t	attrs;
2442 	uint_t		ncookie;
2443 	size_t		alloc_len;
2444 
2445 	attrs = mpt->m_io_dma_attr;
2446 	attrs.dma_attr_sgllen = 1;
2447 
2448 	ASSERT(dma_statep != NULL);
2449 
2450 	if (ddi_dma_alloc_handle(mpt->m_dip, &attrs,
2451 	    DDI_DMA_SLEEP, NULL, &dma_statep->handle) != DDI_SUCCESS) {
2452 		mptsas_log(mpt, CE_WARN,
2453 		    "unable to allocate dma handle.");
2454 		return (DDI_FAILURE);
2455 	}
2456 
2457 	if (ddi_dma_mem_alloc(dma_statep->handle, dma_statep->size,
2458 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2459 	    &dma_statep->memp, &alloc_len, &dma_statep->accessp) !=
2460 	    DDI_SUCCESS) {
2461 		ddi_dma_free_handle(&dma_statep->handle);
2462 		dma_statep->handle = NULL;
2463 		mptsas_log(mpt, CE_WARN,
2464 		    "unable to allocate memory for dma xfer.");
2465 		return (DDI_FAILURE);
2466 	}
2467 
2468 	if (ddi_dma_addr_bind_handle(dma_statep->handle, NULL, dma_statep->memp,
2469 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2470 	    NULL, &dma_statep->cookie, &ncookie) != DDI_DMA_MAPPED) {
2471 		ddi_dma_mem_free(&dma_statep->accessp);
2472 		dma_statep->accessp = NULL;
2473 		ddi_dma_free_handle(&dma_statep->handle);
2474 		dma_statep->handle = NULL;
2475 		mptsas_log(mpt, CE_WARN, "unable to bind DMA resources.");
2476 		return (DDI_FAILURE);
2477 	}
2478 	return (DDI_SUCCESS);
2479 }
2480 
2481 void
2482 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2483 {
2484 	ASSERT(dma_statep != NULL);
2485 	if (dma_statep->handle != NULL) {
2486 		(void) ddi_dma_unbind_handle(dma_statep->handle);
2487 		(void) ddi_dma_mem_free(&dma_statep->accessp);
2488 		ddi_dma_free_handle(&dma_statep->handle);
2489 	}
2490 }
2491 
2492 int
2493 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2494 {
2495 	ddi_dma_attr_t		attrs;
2496 	ddi_dma_handle_t	dma_handle;
2497 	caddr_t			memp;
2498 	uint_t			ncookie;
2499 	ddi_dma_cookie_t	cookie;
2500 	ddi_acc_handle_t	accessp;
2501 	size_t			alloc_len;
2502 	int			rval;
2503 
2504 	ASSERT(mutex_owned(&mpt->m_mutex));
2505 
2506 	attrs = mpt->m_msg_dma_attr;
2507 	attrs.dma_attr_sgllen = 1;
2508 	attrs.dma_attr_granular = size;
2509 
2510 	if (ddi_dma_alloc_handle(mpt->m_dip, &attrs,
2511 	    DDI_DMA_SLEEP, NULL, &dma_handle) != DDI_SUCCESS) {
2512 		mptsas_log(mpt, CE_WARN,
2513 		    "unable to allocate dma handle.");
2514 		return (DDI_FAILURE);
2515 	}
2516 
2517 	if (ddi_dma_mem_alloc(dma_handle, size,
2518 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2519 	    &memp, &alloc_len, &accessp) != DDI_SUCCESS) {
2520 		ddi_dma_free_handle(&dma_handle);
2521 		mptsas_log(mpt, CE_WARN,
2522 		    "unable to allocate request structure.");
2523 		return (DDI_FAILURE);
2524 	}
2525 
2526 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, memp,
2527 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2528 	    NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2529 		(void) ddi_dma_mem_free(&accessp);
2530 		ddi_dma_free_handle(&dma_handle);
2531 		mptsas_log(mpt, CE_WARN, "unable to bind DMA resources.");
2532 		return (DDI_FAILURE);
2533 	}
2534 
2535 	rval = (*callback) (mpt, memp, var, accessp);
2536 
2537 	if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2538 	    (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2539 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2540 		rval = DDI_FAILURE;
2541 	}
2542 
2543 	if (dma_handle != NULL) {
2544 		(void) ddi_dma_unbind_handle(dma_handle);
2545 		(void) ddi_dma_mem_free(&accessp);
2546 		ddi_dma_free_handle(&dma_handle);
2547 	}
2548 
2549 	return (rval);
2550 
2551 }
2552 
2553 static int
2554 mptsas_alloc_request_frames(mptsas_t *mpt)
2555 {
2556 	ddi_dma_attr_t		frame_dma_attrs;
2557 	caddr_t			memp;
2558 	uint_t			ncookie;
2559 	ddi_dma_cookie_t	cookie;
2560 	size_t			alloc_len;
2561 	size_t			mem_size;
2562 
2563 	/*
2564 	 * The size of the request frame pool is:
2565 	 *   Number of Request Frames * Request Frame Size
2566 	 */
2567 	mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2568 
2569 	/*
2570 	 * set the DMA attributes.  System Request Message Frames must be
2571 	 * aligned on a 16-byte boundry.
2572 	 */
2573 	frame_dma_attrs = mpt->m_msg_dma_attr;
2574 	frame_dma_attrs.dma_attr_align = 16;
2575 	frame_dma_attrs.dma_attr_sgllen = 1;
2576 
2577 	/*
2578 	 * allocate the request frame pool.
2579 	 */
2580 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2581 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_req_frame_hdl) != DDI_SUCCESS) {
2582 		mptsas_log(mpt, CE_WARN,
2583 		    "Unable to allocate dma handle.");
2584 		return (DDI_FAILURE);
2585 	}
2586 
2587 	if (ddi_dma_mem_alloc(mpt->m_dma_req_frame_hdl,
2588 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2589 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_req_frame_hdl)
2590 	    != DDI_SUCCESS) {
2591 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2592 		mpt->m_dma_req_frame_hdl = NULL;
2593 		mptsas_log(mpt, CE_WARN,
2594 		    "Unable to allocate request frames.");
2595 		return (DDI_FAILURE);
2596 	}
2597 
2598 	if (ddi_dma_addr_bind_handle(mpt->m_dma_req_frame_hdl, NULL,
2599 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2600 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2601 		(void) ddi_dma_mem_free(&mpt->m_acc_req_frame_hdl);
2602 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2603 		mpt->m_dma_req_frame_hdl = NULL;
2604 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2605 		return (DDI_FAILURE);
2606 	}
2607 
2608 	/*
2609 	 * Store the request frame memory address.  This chip uses this
2610 	 * address to dma to and from the driver's frame.  The second
2611 	 * address is the address mpt uses to fill in the frame.
2612 	 */
2613 	mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2614 	mpt->m_req_frame = memp;
2615 
2616 	/*
2617 	 * Clear the request frame pool.
2618 	 */
2619 	bzero(mpt->m_req_frame, alloc_len);
2620 
2621 	return (DDI_SUCCESS);
2622 }
2623 
2624 static int
2625 mptsas_alloc_reply_frames(mptsas_t *mpt)
2626 {
2627 	ddi_dma_attr_t		frame_dma_attrs;
2628 	caddr_t			memp;
2629 	uint_t			ncookie;
2630 	ddi_dma_cookie_t	cookie;
2631 	size_t			alloc_len;
2632 	size_t			mem_size;
2633 
2634 	/*
2635 	 * The size of the reply frame pool is:
2636 	 *   Number of Reply Frames * Reply Frame Size
2637 	 */
2638 	mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2639 
2640 	/*
2641 	 * set the DMA attributes.   System Reply Message Frames must be
2642 	 * aligned on a 4-byte boundry.  This is the default.
2643 	 */
2644 	frame_dma_attrs = mpt->m_msg_dma_attr;
2645 	frame_dma_attrs.dma_attr_sgllen = 1;
2646 
2647 	/*
2648 	 * allocate the reply frame pool
2649 	 */
2650 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2651 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_reply_frame_hdl) != DDI_SUCCESS) {
2652 		mptsas_log(mpt, CE_WARN,
2653 		    "Unable to allocate dma handle.");
2654 		return (DDI_FAILURE);
2655 	}
2656 
2657 	if (ddi_dma_mem_alloc(mpt->m_dma_reply_frame_hdl,
2658 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2659 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_reply_frame_hdl)
2660 	    != DDI_SUCCESS) {
2661 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2662 		mpt->m_dma_reply_frame_hdl = NULL;
2663 		mptsas_log(mpt, CE_WARN,
2664 		    "Unable to allocate reply frames.");
2665 		return (DDI_FAILURE);
2666 	}
2667 
2668 	if (ddi_dma_addr_bind_handle(mpt->m_dma_reply_frame_hdl, NULL,
2669 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2670 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2671 		(void) ddi_dma_mem_free(&mpt->m_acc_reply_frame_hdl);
2672 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2673 		mpt->m_dma_reply_frame_hdl = NULL;
2674 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2675 		return (DDI_FAILURE);
2676 	}
2677 
2678 	/*
2679 	 * Store the reply frame memory address.  This chip uses this
2680 	 * address to dma to and from the driver's frame.  The second
2681 	 * address is the address mpt uses to process the frame.
2682 	 */
2683 	mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2684 	mpt->m_reply_frame = memp;
2685 
2686 	/*
2687 	 * Clear the reply frame pool.
2688 	 */
2689 	bzero(mpt->m_reply_frame, alloc_len);
2690 
2691 	return (DDI_SUCCESS);
2692 }
2693 
2694 static int
2695 mptsas_alloc_free_queue(mptsas_t *mpt)
2696 {
2697 	ddi_dma_attr_t		frame_dma_attrs;
2698 	caddr_t			memp;
2699 	uint_t			ncookie;
2700 	ddi_dma_cookie_t	cookie;
2701 	size_t			alloc_len;
2702 	size_t			mem_size;
2703 
2704 	/*
2705 	 * The reply free queue size is:
2706 	 *   Reply Free Queue Depth * 4
2707 	 * The "4" is the size of one 32 bit address (low part of 64-bit
2708 	 *   address)
2709 	 */
2710 	mem_size = mpt->m_free_queue_depth * 4;
2711 
2712 	/*
2713 	 * set the DMA attributes  The Reply Free Queue must be aligned on a
2714 	 * 16-byte boundry.
2715 	 */
2716 	frame_dma_attrs = mpt->m_msg_dma_attr;
2717 	frame_dma_attrs.dma_attr_align = 16;
2718 	frame_dma_attrs.dma_attr_sgllen = 1;
2719 
2720 	/*
2721 	 * allocate the reply free queue
2722 	 */
2723 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2724 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_free_queue_hdl) != DDI_SUCCESS) {
2725 		mptsas_log(mpt, CE_WARN,
2726 		    "Unable to allocate dma handle.");
2727 		return (DDI_FAILURE);
2728 	}
2729 
2730 	if (ddi_dma_mem_alloc(mpt->m_dma_free_queue_hdl,
2731 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2732 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_free_queue_hdl)
2733 	    != DDI_SUCCESS) {
2734 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2735 		mpt->m_dma_free_queue_hdl = NULL;
2736 		mptsas_log(mpt, CE_WARN,
2737 		    "Unable to allocate free queue.");
2738 		return (DDI_FAILURE);
2739 	}
2740 
2741 	if (ddi_dma_addr_bind_handle(mpt->m_dma_free_queue_hdl, NULL,
2742 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2743 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2744 		(void) ddi_dma_mem_free(&mpt->m_acc_free_queue_hdl);
2745 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2746 		mpt->m_dma_free_queue_hdl = NULL;
2747 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2748 		return (DDI_FAILURE);
2749 	}
2750 
2751 	/*
2752 	 * Store the reply free queue memory address.  This chip uses this
2753 	 * address to read from the reply free queue.  The second address
2754 	 * is the address mpt uses to manage the queue.
2755 	 */
2756 	mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2757 	mpt->m_free_queue = memp;
2758 
2759 	/*
2760 	 * Clear the reply free queue memory.
2761 	 */
2762 	bzero(mpt->m_free_queue, alloc_len);
2763 
2764 	return (DDI_SUCCESS);
2765 }
2766 
2767 static int
2768 mptsas_alloc_post_queue(mptsas_t *mpt)
2769 {
2770 	ddi_dma_attr_t		frame_dma_attrs;
2771 	caddr_t			memp;
2772 	uint_t			ncookie;
2773 	ddi_dma_cookie_t	cookie;
2774 	size_t			alloc_len;
2775 	size_t			mem_size;
2776 
2777 	/*
2778 	 * The reply descriptor post queue size is:
2779 	 *   Reply Descriptor Post Queue Depth * 8
2780 	 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2781 	 */
2782 	mem_size = mpt->m_post_queue_depth * 8;
2783 
2784 	/*
2785 	 * set the DMA attributes.  The Reply Descriptor Post Queue must be
2786 	 * aligned on a 16-byte boundry.
2787 	 */
2788 	frame_dma_attrs = mpt->m_msg_dma_attr;
2789 	frame_dma_attrs.dma_attr_align = 16;
2790 	frame_dma_attrs.dma_attr_sgllen = 1;
2791 
2792 	/*
2793 	 * allocate the reply post queue
2794 	 */
2795 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2796 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_post_queue_hdl) != DDI_SUCCESS) {
2797 		mptsas_log(mpt, CE_WARN,
2798 		    "Unable to allocate dma handle.");
2799 		return (DDI_FAILURE);
2800 	}
2801 
2802 	if (ddi_dma_mem_alloc(mpt->m_dma_post_queue_hdl,
2803 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2804 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_post_queue_hdl)
2805 	    != DDI_SUCCESS) {
2806 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2807 		mpt->m_dma_post_queue_hdl = NULL;
2808 		mptsas_log(mpt, CE_WARN,
2809 		    "Unable to allocate post queue.");
2810 		return (DDI_FAILURE);
2811 	}
2812 
2813 	if (ddi_dma_addr_bind_handle(mpt->m_dma_post_queue_hdl, NULL,
2814 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2815 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2816 		(void) ddi_dma_mem_free(&mpt->m_acc_post_queue_hdl);
2817 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2818 		mpt->m_dma_post_queue_hdl = NULL;
2819 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2820 		return (DDI_FAILURE);
2821 	}
2822 
2823 	/*
2824 	 * Store the reply descriptor post queue memory address.  This chip
2825 	 * uses this address to write to the reply descriptor post queue.  The
2826 	 * second address is the address mpt uses to manage the queue.
2827 	 */
2828 	mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2829 	mpt->m_post_queue = memp;
2830 
2831 	/*
2832 	 * Clear the reply post queue memory.
2833 	 */
2834 	bzero(mpt->m_post_queue, alloc_len);
2835 
2836 	return (DDI_SUCCESS);
2837 }
2838 
2839 static int
2840 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2841 {
2842 	mptsas_cache_frames_t	*frames = NULL;
2843 	if (cmd->cmd_extra_frames == NULL) {
2844 		frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2845 		if (frames == NULL) {
2846 			return (DDI_FAILURE);
2847 		}
2848 		cmd->cmd_extra_frames = frames;
2849 	}
2850 	return (DDI_SUCCESS);
2851 }
2852 
2853 static void
2854 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2855 {
2856 	if (cmd->cmd_extra_frames) {
2857 		kmem_cache_free(mpt->m_cache_frames,
2858 		    (void *)cmd->cmd_extra_frames);
2859 		cmd->cmd_extra_frames = NULL;
2860 	}
2861 }
2862 
2863 static void
2864 mptsas_cfg_fini(mptsas_t *mpt)
2865 {
2866 	NDBG0(("mptsas_cfg_fini"));
2867 	ddi_regs_map_free(&mpt->m_datap);
2868 }
2869 
2870 static void
2871 mptsas_hba_fini(mptsas_t *mpt)
2872 {
2873 	NDBG0(("mptsas_hba_fini"));
2874 
2875 	/*
2876 	 * Disable any bus mastering ability (i.e: DMA) prior to freeing any
2877 	 * allocated DMA resources.
2878 	 */
2879 	if (mpt->m_config_handle != NULL)
2880 		mptsas_disable_bus_master(mpt);
2881 
2882 	/*
2883 	 * Free up any allocated memory
2884 	 */
2885 	if (mpt->m_dma_req_frame_hdl != NULL) {
2886 		(void) ddi_dma_unbind_handle(mpt->m_dma_req_frame_hdl);
2887 		ddi_dma_mem_free(&mpt->m_acc_req_frame_hdl);
2888 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2889 		mpt->m_dma_req_frame_hdl = NULL;
2890 	}
2891 
2892 	if (mpt->m_dma_reply_frame_hdl != NULL) {
2893 		(void) ddi_dma_unbind_handle(mpt->m_dma_reply_frame_hdl);
2894 		ddi_dma_mem_free(&mpt->m_acc_reply_frame_hdl);
2895 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2896 		mpt->m_dma_reply_frame_hdl = NULL;
2897 	}
2898 
2899 	if (mpt->m_dma_free_queue_hdl != NULL) {
2900 		(void) ddi_dma_unbind_handle(mpt->m_dma_free_queue_hdl);
2901 		ddi_dma_mem_free(&mpt->m_acc_free_queue_hdl);
2902 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2903 		mpt->m_dma_free_queue_hdl = NULL;
2904 	}
2905 
2906 	if (mpt->m_dma_post_queue_hdl != NULL) {
2907 		(void) ddi_dma_unbind_handle(mpt->m_dma_post_queue_hdl);
2908 		ddi_dma_mem_free(&mpt->m_acc_post_queue_hdl);
2909 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2910 		mpt->m_dma_post_queue_hdl = NULL;
2911 	}
2912 
2913 	if (mpt->m_replyh_args != NULL) {
2914 		kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2915 		    * mpt->m_max_replies);
2916 	}
2917 }
2918 
2919 static int
2920 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2921 {
2922 	int		lun = 0;
2923 	char		*sas_wwn = NULL;
2924 	int		phynum = -1;
2925 	int		reallen = 0;
2926 
2927 	/* Get the target num */
2928 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2929 	    LUN_PROP, 0);
2930 
2931 	if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2932 	    DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2933 		/*
2934 		 * Stick in the address of form "pPHY,LUN"
2935 		 */
2936 		reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2937 	} else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2938 	    DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2939 	    == DDI_PROP_SUCCESS) {
2940 		/*
2941 		 * Stick in the address of the form "wWWN,LUN"
2942 		 */
2943 		reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2944 		ddi_prop_free(sas_wwn);
2945 	} else {
2946 		return (DDI_FAILURE);
2947 	}
2948 
2949 	ASSERT(reallen < len);
2950 	if (reallen >= len) {
2951 		mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2952 		    "length too small, it needs to be %d bytes", reallen + 1);
2953 	}
2954 	return (DDI_SUCCESS);
2955 }
2956 
2957 /*
2958  * tran_tgt_init(9E) - target device instance initialization
2959  */
2960 static int
2961 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2962     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2963 {
2964 #ifndef __lock_lint
2965 	_NOTE(ARGUNUSED(hba_tran))
2966 #endif
2967 
2968 	/*
2969 	 * At this point, the scsi_device structure already exists
2970 	 * and has been initialized.
2971 	 *
2972 	 * Use this function to allocate target-private data structures,
2973 	 * if needed by this HBA.  Add revised flow-control and queue
2974 	 * properties for child here, if desired and if you can tell they
2975 	 * support tagged queueing by now.
2976 	 */
2977 	mptsas_t		*mpt;
2978 	int			lun = sd->sd_address.a_lun;
2979 	mdi_pathinfo_t		*pip = NULL;
2980 	mptsas_tgt_private_t	*tgt_private = NULL;
2981 	mptsas_target_t		*ptgt = NULL;
2982 	char			*psas_wwn = NULL;
2983 	int			phymask = 0;
2984 	uint64_t		sas_wwn = 0;
2985 	mpt = SDEV2MPT(sd);
2986 
2987 	ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2988 
2989 	NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2990 	    (void *)hba_dip, (void *)tgt_dip, lun));
2991 
2992 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2993 		(void) ndi_merge_node(tgt_dip, mptsas_name_child);
2994 		ddi_set_name_addr(tgt_dip, NULL);
2995 		return (DDI_FAILURE);
2996 	}
2997 	/*
2998 	 * phymask is 0 means the virtual port for RAID
2999 	 */
3000 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3001 	    "phymask", 0);
3002 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3003 		if ((pip = (void *)(sd->sd_private)) == NULL) {
3004 			/*
3005 			 * Very bad news if this occurs. Somehow scsi_vhci has
3006 			 * lost the pathinfo node for this target.
3007 			 */
3008 			return (DDI_NOT_WELL_FORMED);
3009 		}
3010 
3011 		if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3012 		    DDI_PROP_SUCCESS) {
3013 			mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3014 			return (DDI_FAILURE);
3015 		}
3016 
3017 		if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3018 		    &psas_wwn) == MDI_SUCCESS) {
3019 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3020 				sas_wwn = 0;
3021 			}
3022 			(void) mdi_prop_free(psas_wwn);
3023 		}
3024 	} else {
3025 		lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3026 		    DDI_PROP_DONTPASS, LUN_PROP, 0);
3027 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3028 		    DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3029 		    DDI_PROP_SUCCESS) {
3030 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3031 				sas_wwn = 0;
3032 			}
3033 			ddi_prop_free(psas_wwn);
3034 		} else {
3035 			sas_wwn = 0;
3036 		}
3037 	}
3038 	ASSERT((sas_wwn != 0) || (phymask != 0));
3039 	mutex_enter(&mpt->m_mutex);
3040 	ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
3041 	mutex_exit(&mpt->m_mutex);
3042 	if (ptgt == NULL) {
3043 		mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3044 		    "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3045 		    sas_wwn);
3046 		return (DDI_FAILURE);
3047 	}
3048 	if (hba_tran->tran_tgt_private == NULL) {
3049 		tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3050 		    KM_SLEEP);
3051 		tgt_private->t_lun = lun;
3052 		tgt_private->t_private = ptgt;
3053 		hba_tran->tran_tgt_private = tgt_private;
3054 	}
3055 
3056 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3057 		return (DDI_SUCCESS);
3058 	}
3059 	mutex_enter(&mpt->m_mutex);
3060 
3061 	if (ptgt->m_deviceinfo &
3062 	    (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3063 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3064 		uchar_t *inq89 = NULL;
3065 		int inq89_len = 0x238;
3066 		int reallen = 0;
3067 		int rval = 0;
3068 		struct sata_id *sid = NULL;
3069 		char model[SATA_ID_MODEL_LEN + 1];
3070 		char fw[SATA_ID_FW_LEN + 1];
3071 		char *vid, *pid;
3072 		int i;
3073 
3074 		mutex_exit(&mpt->m_mutex);
3075 		/*
3076 		 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3077 		 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3078 		 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3079 		 */
3080 		inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3081 		rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3082 		    inq89, inq89_len, &reallen, 1);
3083 
3084 		if (rval != 0) {
3085 			if (inq89 != NULL) {
3086 				kmem_free(inq89, inq89_len);
3087 			}
3088 
3089 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3090 			    "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3091 			return (DDI_SUCCESS);
3092 		}
3093 		sid = (void *)(&inq89[60]);
3094 
3095 		swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3096 		swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3097 
3098 		model[SATA_ID_MODEL_LEN] = 0;
3099 		fw[SATA_ID_FW_LEN] = 0;
3100 
3101 		/*
3102 		 * split model into into vid/pid
3103 		 */
3104 		for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
3105 			if ((*pid == ' ') || (*pid == '\t'))
3106 				break;
3107 		if (i < SATA_ID_MODEL_LEN) {
3108 			vid = model;
3109 			/*
3110 			 * terminate vid, establish pid
3111 			 */
3112 			*pid++ = 0;
3113 		} else {
3114 			/*
3115 			 * vid will stay "ATA     ", the rule is same
3116 			 * as sata framework implementation.
3117 			 */
3118 			vid = NULL;
3119 			/*
3120 			 * model is all pid
3121 			 */
3122 			pid = model;
3123 		}
3124 
3125 		/*
3126 		 * override SCSA "inquiry-*" properties
3127 		 */
3128 		if (vid)
3129 			(void) scsi_device_prop_update_inqstring(sd,
3130 			    INQUIRY_VENDOR_ID, vid, strlen(vid));
3131 		if (pid)
3132 			(void) scsi_device_prop_update_inqstring(sd,
3133 			    INQUIRY_PRODUCT_ID, pid, strlen(pid));
3134 		(void) scsi_device_prop_update_inqstring(sd,
3135 		    INQUIRY_REVISION_ID, fw, strlen(fw));
3136 
3137 		if (inq89 != NULL) {
3138 			kmem_free(inq89, inq89_len);
3139 		}
3140 	} else {
3141 		mutex_exit(&mpt->m_mutex);
3142 	}
3143 
3144 	return (DDI_SUCCESS);
3145 }
3146 /*
3147  * tran_tgt_free(9E) - target device instance deallocation
3148  */
3149 static void
3150 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3151     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3152 {
3153 #ifndef __lock_lint
3154 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3155 #endif
3156 
3157 	mptsas_tgt_private_t	*tgt_private = hba_tran->tran_tgt_private;
3158 
3159 	if (tgt_private != NULL) {
3160 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3161 		hba_tran->tran_tgt_private = NULL;
3162 	}
3163 }
3164 
3165 /*
3166  * scsi_pkt handling
3167  *
3168  * Visible to the external world via the transport structure.
3169  */
3170 
3171 /*
3172  * Notes:
3173  *	- transport the command to the addressed SCSI target/lun device
3174  *	- normal operation is to schedule the command to be transported,
3175  *	  and return TRAN_ACCEPT if this is successful.
3176  *	- if NO_INTR, tran_start must poll device for command completion
3177  */
3178 static int
3179 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3180 {
3181 #ifndef __lock_lint
3182 	_NOTE(ARGUNUSED(ap))
3183 #endif
3184 	mptsas_t	*mpt = PKT2MPT(pkt);
3185 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3186 	int		rval;
3187 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3188 
3189 	NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3190 	ASSERT(ptgt);
3191 	if (ptgt == NULL)
3192 		return (TRAN_FATAL_ERROR);
3193 
3194 	/*
3195 	 * prepare the pkt before taking mutex.
3196 	 */
3197 	rval = mptsas_prepare_pkt(cmd);
3198 	if (rval != TRAN_ACCEPT) {
3199 		return (rval);
3200 	}
3201 
3202 	/*
3203 	 * Send the command to target/lun, however your HBA requires it.
3204 	 * If busy, return TRAN_BUSY; if there's some other formatting error
3205 	 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3206 	 * return of TRAN_ACCEPT.
3207 	 *
3208 	 * Remember that access to shared resources, including the mptsas_t
3209 	 * data structure and the HBA hardware registers, must be protected
3210 	 * with mutexes, here and everywhere.
3211 	 *
3212 	 * Also remember that at interrupt time, you'll get an argument
3213 	 * to the interrupt handler which is a pointer to your mptsas_t
3214 	 * structure; you'll have to remember which commands are outstanding
3215 	 * and which scsi_pkt is the currently-running command so the
3216 	 * interrupt handler can refer to the pkt to set completion
3217 	 * status, call the target driver back through pkt_comp, etc.
3218 	 *
3219 	 * If the instance lock is held by other thread, don't spin to wait
3220 	 * for it. Instead, queue the cmd and next time when the instance lock
3221 	 * is not held, accept all the queued cmd. A extra tx_waitq is
3222 	 * introduced to protect the queue.
3223 	 *
3224 	 * The polled cmd will not be queud and accepted as usual.
3225 	 *
3226 	 * Under the tx_waitq mutex, record whether a thread is draining
3227 	 * the tx_waitq.  An IO requesting thread that finds the instance
3228 	 * mutex contended appends to the tx_waitq and while holding the
3229 	 * tx_wait mutex, if the draining flag is not set, sets it and then
3230 	 * proceeds to spin for the instance mutex. This scheme ensures that
3231 	 * the last cmd in a burst be processed.
3232 	 *
3233 	 * we enable this feature only when the helper threads are enabled,
3234 	 * at which we think the loads are heavy.
3235 	 *
3236 	 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3237 	 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3238 	 */
3239 
3240 	if (mpt->m_doneq_thread_n) {
3241 		if (mutex_tryenter(&mpt->m_mutex) != 0) {
3242 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3243 			mutex_exit(&mpt->m_mutex);
3244 		} else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3245 			mutex_enter(&mpt->m_mutex);
3246 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3247 			mutex_exit(&mpt->m_mutex);
3248 		} else {
3249 			mutex_enter(&mpt->m_tx_waitq_mutex);
3250 			/*
3251 			 * ptgt->m_dr_flag is protected by m_mutex or
3252 			 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3253 			 * is acquired.
3254 			 */
3255 			if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3256 				if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3257 					/*
3258 					 * The command should be allowed to
3259 					 * retry by returning TRAN_BUSY to
3260 					 * to stall the I/O's which come from
3261 					 * scsi_vhci since the device/path is
3262 					 * in unstable state now.
3263 					 */
3264 					mutex_exit(&mpt->m_tx_waitq_mutex);
3265 					return (TRAN_BUSY);
3266 				} else {
3267 					/*
3268 					 * The device is offline, just fail the
3269 					 * command by returning
3270 					 * TRAN_FATAL_ERROR.
3271 					 */
3272 					mutex_exit(&mpt->m_tx_waitq_mutex);
3273 					return (TRAN_FATAL_ERROR);
3274 				}
3275 			}
3276 			if (mpt->m_tx_draining) {
3277 				cmd->cmd_flags |= CFLAG_TXQ;
3278 				*mpt->m_tx_waitqtail = cmd;
3279 				mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3280 				mutex_exit(&mpt->m_tx_waitq_mutex);
3281 			} else { /* drain the queue */
3282 				mpt->m_tx_draining = 1;
3283 				mutex_exit(&mpt->m_tx_waitq_mutex);
3284 				mutex_enter(&mpt->m_mutex);
3285 				rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3286 				mutex_exit(&mpt->m_mutex);
3287 			}
3288 		}
3289 	} else {
3290 		mutex_enter(&mpt->m_mutex);
3291 		/*
3292 		 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3293 		 * in this case, m_mutex is acquired.
3294 		 */
3295 		if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3296 			if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3297 				/*
3298 				 * commands should be allowed to retry by
3299 				 * returning TRAN_BUSY to stall the I/O's
3300 				 * which come from scsi_vhci since the device/
3301 				 * path is in unstable state now.
3302 				 */
3303 				mutex_exit(&mpt->m_mutex);
3304 				return (TRAN_BUSY);
3305 			} else {
3306 				/*
3307 				 * The device is offline, just fail the
3308 				 * command by returning TRAN_FATAL_ERROR.
3309 				 */
3310 				mutex_exit(&mpt->m_mutex);
3311 				return (TRAN_FATAL_ERROR);
3312 			}
3313 		}
3314 		rval = mptsas_accept_pkt(mpt, cmd);
3315 		mutex_exit(&mpt->m_mutex);
3316 	}
3317 
3318 	return (rval);
3319 }
3320 
3321 /*
3322  * Accept all the queued cmds(if any) before accept the current one.
3323  */
3324 static int
3325 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3326 {
3327 	int rval;
3328 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3329 
3330 	ASSERT(mutex_owned(&mpt->m_mutex));
3331 	/*
3332 	 * The call to mptsas_accept_tx_waitq() must always be performed
3333 	 * because that is where mpt->m_tx_draining is cleared.
3334 	 */
3335 	mutex_enter(&mpt->m_tx_waitq_mutex);
3336 	mptsas_accept_tx_waitq(mpt);
3337 	mutex_exit(&mpt->m_tx_waitq_mutex);
3338 	/*
3339 	 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3340 	 * in this case, m_mutex is acquired.
3341 	 */
3342 	if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3343 		if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3344 			/*
3345 			 * The command should be allowed to retry by returning
3346 			 * TRAN_BUSY to stall the I/O's which come from
3347 			 * scsi_vhci since the device/path is in unstable state
3348 			 * now.
3349 			 */
3350 			return (TRAN_BUSY);
3351 		} else {
3352 			/*
3353 			 * The device is offline, just fail the command by
3354 			 * return TRAN_FATAL_ERROR.
3355 			 */
3356 			return (TRAN_FATAL_ERROR);
3357 		}
3358 	}
3359 	rval = mptsas_accept_pkt(mpt, cmd);
3360 
3361 	return (rval);
3362 }
3363 
3364 static int
3365 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3366 {
3367 	int		rval = TRAN_ACCEPT;
3368 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3369 
3370 	NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3371 
3372 	ASSERT(mutex_owned(&mpt->m_mutex));
3373 
3374 	if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3375 		rval = mptsas_prepare_pkt(cmd);
3376 		if (rval != TRAN_ACCEPT) {
3377 			cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3378 			return (rval);
3379 		}
3380 	}
3381 
3382 	/*
3383 	 * reset the throttle if we were draining
3384 	 */
3385 	if ((ptgt->m_t_ncmds == 0) &&
3386 	    (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3387 		NDBG23(("reset throttle"));
3388 		ASSERT(ptgt->m_reset_delay == 0);
3389 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3390 	}
3391 
3392 	/*
3393 	 * If HBA is being reset, the DevHandles are being re-initialized,
3394 	 * which means that they could be invalid even if the target is still
3395 	 * attached.  Check if being reset and if DevHandle is being
3396 	 * re-initialized.  If this is the case, return BUSY so the I/O can be
3397 	 * retried later.
3398 	 */
3399 	if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3400 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3401 		if (cmd->cmd_flags & CFLAG_TXQ) {
3402 			mptsas_doneq_add(mpt, cmd);
3403 			mptsas_doneq_empty(mpt);
3404 			return (rval);
3405 		} else {
3406 			return (TRAN_BUSY);
3407 		}
3408 	}
3409 
3410 	/*
3411 	 * If device handle has already been invalidated, just
3412 	 * fail the command. In theory, command from scsi_vhci
3413 	 * client is impossible send down command with invalid
3414 	 * devhdl since devhdl is set after path offline, target
3415 	 * driver is not suppose to select a offlined path.
3416 	 */
3417 	if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3418 		NDBG20(("rejecting command, it might because invalid devhdl "
3419 		    "request."));
3420 		mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3421 		if (cmd->cmd_flags & CFLAG_TXQ) {
3422 			mptsas_doneq_add(mpt, cmd);
3423 			mptsas_doneq_empty(mpt);
3424 			return (rval);
3425 		} else {
3426 			return (TRAN_FATAL_ERROR);
3427 		}
3428 	}
3429 	/*
3430 	 * The first case is the normal case.  mpt gets a command from the
3431 	 * target driver and starts it.
3432 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3433 	 * commands is m_max_requests - 2.
3434 	 */
3435 	if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3436 	    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3437 	    (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3438 	    (ptgt->m_reset_delay == 0) &&
3439 	    (ptgt->m_t_nwait == 0) &&
3440 	    ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3441 		if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3442 			(void) mptsas_start_cmd(mpt, cmd);
3443 		} else {
3444 			mptsas_waitq_add(mpt, cmd);
3445 		}
3446 	} else {
3447 		/*
3448 		 * Add this pkt to the work queue
3449 		 */
3450 		mptsas_waitq_add(mpt, cmd);
3451 
3452 		if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3453 			(void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3454 
3455 			/*
3456 			 * Only flush the doneq if this is not a TM
3457 			 * cmd.  For TM cmds the flushing of the
3458 			 * doneq will be done in those routines.
3459 			 */
3460 			if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3461 				mptsas_doneq_empty(mpt);
3462 			}
3463 		}
3464 	}
3465 	return (rval);
3466 }
3467 
3468 int
3469 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3470 {
3471 	mptsas_slots_t	*slots;
3472 	int		slot;
3473 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3474 
3475 	ASSERT(mutex_owned(&mpt->m_mutex));
3476 	slots = mpt->m_active;
3477 
3478 	/*
3479 	 * Account for reserved TM request slot and reserved SMID of 0.
3480 	 */
3481 	ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3482 
3483 	/*
3484 	 * m_tags is equivalent to the SMID when sending requests.  Since the
3485 	 * SMID cannot be 0, start out at one if rolling over past the size
3486 	 * of the request queue depth.  Also, don't use the last SMID, which is
3487 	 * reserved for TM requests.
3488 	 */
3489 	slot = (slots->m_tags)++;
3490 	if (slots->m_tags > slots->m_n_slots) {
3491 		slots->m_tags = 1;
3492 	}
3493 
3494 alloc_tag:
3495 	/* Validate tag, should never fail. */
3496 	if (slots->m_slot[slot] == NULL) {
3497 		/*
3498 		 * Make sure SMID is not using reserved value of 0
3499 		 * and the TM request slot.
3500 		 */
3501 		ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3502 		cmd->cmd_slot = slot;
3503 		slots->m_slot[slot] = cmd;
3504 		mpt->m_ncmds++;
3505 
3506 		/*
3507 		 * only increment per target ncmds if this is not a
3508 		 * command that has no target associated with it (i.e. a
3509 		 * event acknoledgment)
3510 		 */
3511 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3512 			ptgt->m_t_ncmds++;
3513 		}
3514 		cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3515 
3516 		/*
3517 		 * If initial timout is less than or equal to one tick, bump
3518 		 * the timeout by a tick so that command doesn't timeout before
3519 		 * its allotted time.
3520 		 */
3521 		if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3522 			cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3523 		}
3524 		return (TRUE);
3525 	} else {
3526 		int i;
3527 
3528 		/*
3529 		 * If slot in use, scan until a free one is found. Don't use 0
3530 		 * or final slot, which is reserved for TM requests.
3531 		 */
3532 		for (i = 0; i < slots->m_n_slots; i++) {
3533 			slot = slots->m_tags;
3534 			if (++(slots->m_tags) > slots->m_n_slots) {
3535 				slots->m_tags = 1;
3536 			}
3537 			if (slots->m_slot[slot] == NULL) {
3538 				NDBG22(("found free slot %d", slot));
3539 				goto alloc_tag;
3540 			}
3541 		}
3542 	}
3543 	return (FALSE);
3544 }
3545 
3546 /*
3547  * prepare the pkt:
3548  * the pkt may have been resubmitted or just reused so
3549  * initialize some fields and do some checks.
3550  */
3551 static int
3552 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3553 {
3554 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
3555 
3556 	NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3557 
3558 	/*
3559 	 * Reinitialize some fields that need it; the packet may
3560 	 * have been resubmitted
3561 	 */
3562 	pkt->pkt_reason = CMD_CMPLT;
3563 	pkt->pkt_state = 0;
3564 	pkt->pkt_statistics = 0;
3565 	pkt->pkt_resid = 0;
3566 	cmd->cmd_age = 0;
3567 	cmd->cmd_pkt_flags = pkt->pkt_flags;
3568 
3569 	/*
3570 	 * zero status byte.
3571 	 */
3572 	*(pkt->pkt_scbp) = 0;
3573 
3574 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3575 		pkt->pkt_resid = cmd->cmd_dmacount;
3576 
3577 		/*
3578 		 * consistent packets need to be sync'ed first
3579 		 * (only for data going out)
3580 		 */
3581 		if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3582 		    (cmd->cmd_flags & CFLAG_DMASEND)) {
3583 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3584 			    DDI_DMA_SYNC_FORDEV);
3585 		}
3586 	}
3587 
3588 	cmd->cmd_flags =
3589 	    (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3590 	    CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3591 
3592 	return (TRAN_ACCEPT);
3593 }
3594 
3595 /*
3596  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3597  *
3598  * One of three possibilities:
3599  *	- allocate scsi_pkt
3600  *	- allocate scsi_pkt and DMA resources
3601  *	- allocate DMA resources to an already-allocated pkt
3602  */
3603 static struct scsi_pkt *
3604 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3605     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3606     int (*callback)(), caddr_t arg)
3607 {
3608 	mptsas_cmd_t		*cmd, *new_cmd;
3609 	mptsas_t		*mpt = ADDR2MPT(ap);
3610 	int			failure = 1;
3611 	uint_t			oldcookiec;
3612 	mptsas_target_t		*ptgt = NULL;
3613 	int			rval;
3614 	mptsas_tgt_private_t	*tgt_private;
3615 	int			kf;
3616 
3617 	kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3618 
3619 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3620 	    tran_tgt_private;
3621 	ASSERT(tgt_private != NULL);
3622 	if (tgt_private == NULL) {
3623 		return (NULL);
3624 	}
3625 	ptgt = tgt_private->t_private;
3626 	ASSERT(ptgt != NULL);
3627 	if (ptgt == NULL)
3628 		return (NULL);
3629 	ap->a_target = ptgt->m_devhdl;
3630 	ap->a_lun = tgt_private->t_lun;
3631 
3632 	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3633 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3634 	statuslen *= 100; tgtlen *= 4;
3635 #endif
3636 	NDBG3(("mptsas_scsi_init_pkt:\n"
3637 	    "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3638 	    ap->a_target, (void *)pkt, (void *)bp,
3639 	    cmdlen, statuslen, tgtlen, flags));
3640 
3641 	/*
3642 	 * Allocate the new packet.
3643 	 */
3644 	if (pkt == NULL) {
3645 		ddi_dma_handle_t	save_dma_handle;
3646 		ddi_dma_handle_t	save_arq_dma_handle;
3647 		struct buf		*save_arq_bp;
3648 		ddi_dma_cookie_t	save_arqcookie;
3649 
3650 		cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3651 
3652 		if (cmd) {
3653 			save_dma_handle = cmd->cmd_dmahandle;
3654 			save_arq_dma_handle = cmd->cmd_arqhandle;
3655 			save_arq_bp = cmd->cmd_arq_buf;
3656 			save_arqcookie = cmd->cmd_arqcookie;
3657 			bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3658 			cmd->cmd_dmahandle = save_dma_handle;
3659 			cmd->cmd_arqhandle = save_arq_dma_handle;
3660 			cmd->cmd_arq_buf = save_arq_bp;
3661 			cmd->cmd_arqcookie = save_arqcookie;
3662 
3663 			pkt = (void *)((uchar_t *)cmd +
3664 			    sizeof (struct mptsas_cmd));
3665 			pkt->pkt_ha_private = (opaque_t)cmd;
3666 			pkt->pkt_address = *ap;
3667 			pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3668 			pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3669 			pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3670 			cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3671 			cmd->cmd_cdblen = (uchar_t)cmdlen;
3672 			cmd->cmd_scblen = statuslen;
3673 			cmd->cmd_rqslen = SENSE_LENGTH;
3674 			cmd->cmd_tgt_addr = ptgt;
3675 			failure = 0;
3676 		}
3677 
3678 		if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3679 		    (tgtlen > PKT_PRIV_LEN) ||
3680 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
3681 			if (failure == 0) {
3682 				/*
3683 				 * if extern alloc fails, all will be
3684 				 * deallocated, including cmd
3685 				 */
3686 				failure = mptsas_pkt_alloc_extern(mpt, cmd,
3687 				    cmdlen, tgtlen, statuslen, kf);
3688 			}
3689 			if (failure) {
3690 				/*
3691 				 * if extern allocation fails, it will
3692 				 * deallocate the new pkt as well
3693 				 */
3694 				return (NULL);
3695 			}
3696 		}
3697 		new_cmd = cmd;
3698 
3699 	} else {
3700 		cmd = PKT2CMD(pkt);
3701 		new_cmd = NULL;
3702 	}
3703 
3704 
3705 	/* grab cmd->cmd_cookiec here as oldcookiec */
3706 
3707 	oldcookiec = cmd->cmd_cookiec;
3708 
3709 	/*
3710 	 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3711 	 * greater than 0 and we'll need to grab the next dma window
3712 	 */
3713 	/*
3714 	 * SLM-not doing extra command frame right now; may add later
3715 	 */
3716 
3717 	if (cmd->cmd_nwin > 0) {
3718 
3719 		/*
3720 		 * Make sure we havn't gone past the the total number
3721 		 * of windows
3722 		 */
3723 		if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3724 			return (NULL);
3725 		}
3726 		if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3727 		    &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3728 		    &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3729 			return (NULL);
3730 		}
3731 		goto get_dma_cookies;
3732 	}
3733 
3734 
3735 	if (flags & PKT_XARQ) {
3736 		cmd->cmd_flags |= CFLAG_XARQ;
3737 	}
3738 
3739 	/*
3740 	 * DMA resource allocation.  This version assumes your
3741 	 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3742 	 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3743 	 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3744 	 */
3745 	if (bp && (bp->b_bcount != 0) &&
3746 	    (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3747 
3748 		int	cnt, dma_flags;
3749 		mptti_t	*dmap;		/* ptr to the S/G list */
3750 
3751 		/*
3752 		 * Set up DMA memory and position to the next DMA segment.
3753 		 */
3754 		ASSERT(cmd->cmd_dmahandle != NULL);
3755 
3756 		if (bp->b_flags & B_READ) {
3757 			dma_flags = DDI_DMA_READ;
3758 			cmd->cmd_flags &= ~CFLAG_DMASEND;
3759 		} else {
3760 			dma_flags = DDI_DMA_WRITE;
3761 			cmd->cmd_flags |= CFLAG_DMASEND;
3762 		}
3763 		if (flags & PKT_CONSISTENT) {
3764 			cmd->cmd_flags |= CFLAG_CMDIOPB;
3765 			dma_flags |= DDI_DMA_CONSISTENT;
3766 		}
3767 
3768 		if (flags & PKT_DMA_PARTIAL) {
3769 			dma_flags |= DDI_DMA_PARTIAL;
3770 		}
3771 
3772 		/*
3773 		 * workaround for byte hole issue on psycho and
3774 		 * schizo pre 2.1
3775 		 */
3776 		if ((bp->b_flags & B_READ) && ((bp->b_flags &
3777 		    (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3778 		    ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3779 			dma_flags |= DDI_DMA_CONSISTENT;
3780 		}
3781 
3782 		rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3783 		    dma_flags, callback, arg,
3784 		    &cmd->cmd_cookie, &cmd->cmd_cookiec);
3785 		if (rval == DDI_DMA_PARTIAL_MAP) {
3786 			(void) ddi_dma_numwin(cmd->cmd_dmahandle,
3787 			    &cmd->cmd_nwin);
3788 			cmd->cmd_winindex = 0;
3789 			(void) ddi_dma_getwin(cmd->cmd_dmahandle,
3790 			    cmd->cmd_winindex, &cmd->cmd_dma_offset,
3791 			    &cmd->cmd_dma_len, &cmd->cmd_cookie,
3792 			    &cmd->cmd_cookiec);
3793 		} else if (rval && (rval != DDI_DMA_MAPPED)) {
3794 			switch (rval) {
3795 			case DDI_DMA_NORESOURCES:
3796 				bioerror(bp, 0);
3797 				break;
3798 			case DDI_DMA_BADATTR:
3799 			case DDI_DMA_NOMAPPING:
3800 				bioerror(bp, EFAULT);
3801 				break;
3802 			case DDI_DMA_TOOBIG:
3803 			default:
3804 				bioerror(bp, EINVAL);
3805 				break;
3806 			}
3807 			cmd->cmd_flags &= ~CFLAG_DMAVALID;
3808 			if (new_cmd) {
3809 				mptsas_scsi_destroy_pkt(ap, pkt);
3810 			}
3811 			return ((struct scsi_pkt *)NULL);
3812 		}
3813 
3814 get_dma_cookies:
3815 		cmd->cmd_flags |= CFLAG_DMAVALID;
3816 		ASSERT(cmd->cmd_cookiec > 0);
3817 
3818 		if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3819 			mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3820 			    cmd->cmd_cookiec);
3821 			bioerror(bp, EINVAL);
3822 			if (new_cmd) {
3823 				mptsas_scsi_destroy_pkt(ap, pkt);
3824 			}
3825 			return ((struct scsi_pkt *)NULL);
3826 		}
3827 
3828 		/*
3829 		 * Allocate extra SGL buffer if needed.
3830 		 */
3831 		if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3832 		    (cmd->cmd_extra_frames == NULL)) {
3833 			if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3834 			    DDI_FAILURE) {
3835 				mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3836 				    "failed");
3837 				bioerror(bp, ENOMEM);
3838 				if (new_cmd) {
3839 					mptsas_scsi_destroy_pkt(ap, pkt);
3840 				}
3841 				return ((struct scsi_pkt *)NULL);
3842 			}
3843 		}
3844 
3845 		/*
3846 		 * Always use scatter-gather transfer
3847 		 * Use the loop below to store physical addresses of
3848 		 * DMA segments, from the DMA cookies, into your HBA's
3849 		 * scatter-gather list.
3850 		 * We need to ensure we have enough kmem alloc'd
3851 		 * for the sg entries since we are no longer using an
3852 		 * array inside mptsas_cmd_t.
3853 		 *
3854 		 * We check cmd->cmd_cookiec against oldcookiec so
3855 		 * the scatter-gather list is correctly allocated
3856 		 */
3857 
3858 		if (oldcookiec != cmd->cmd_cookiec) {
3859 			if (cmd->cmd_sg != (mptti_t *)NULL) {
3860 				kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3861 				    oldcookiec);
3862 				cmd->cmd_sg = NULL;
3863 			}
3864 		}
3865 
3866 		if (cmd->cmd_sg == (mptti_t *)NULL) {
3867 			cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3868 			    cmd->cmd_cookiec), kf);
3869 
3870 			if (cmd->cmd_sg == (mptti_t *)NULL) {
3871 				mptsas_log(mpt, CE_WARN,
3872 				    "unable to kmem_alloc enough memory "
3873 				    "for scatter/gather list");
3874 		/*
3875 		 * if we have an ENOMEM condition we need to behave
3876 		 * the same way as the rest of this routine
3877 		 */
3878 
3879 				bioerror(bp, ENOMEM);
3880 				if (new_cmd) {
3881 					mptsas_scsi_destroy_pkt(ap, pkt);
3882 				}
3883 				return ((struct scsi_pkt *)NULL);
3884 			}
3885 		}
3886 
3887 		dmap = cmd->cmd_sg;
3888 
3889 		ASSERT(cmd->cmd_cookie.dmac_size != 0);
3890 
3891 		/*
3892 		 * store the first segment into the S/G list
3893 		 */
3894 		dmap->count = cmd->cmd_cookie.dmac_size;
3895 		dmap->addr.address64.Low = (uint32_t)
3896 		    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3897 		dmap->addr.address64.High = (uint32_t)
3898 		    (cmd->cmd_cookie.dmac_laddress >> 32);
3899 
3900 		/*
3901 		 * dmacount counts the size of the dma for this window
3902 		 * (if partial dma is being used).  totaldmacount
3903 		 * keeps track of the total amount of dma we have
3904 		 * transferred for all the windows (needed to calculate
3905 		 * the resid value below).
3906 		 */
3907 		cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3908 		cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3909 
3910 		/*
3911 		 * We already stored the first DMA scatter gather segment,
3912 		 * start at 1 if we need to store more.
3913 		 */
3914 		for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3915 			/*
3916 			 * Get next DMA cookie
3917 			 */
3918 			ddi_dma_nextcookie(cmd->cmd_dmahandle,
3919 			    &cmd->cmd_cookie);
3920 			dmap++;
3921 
3922 			cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3923 			cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3924 
3925 			/*
3926 			 * store the segment parms into the S/G list
3927 			 */
3928 			dmap->count = cmd->cmd_cookie.dmac_size;
3929 			dmap->addr.address64.Low = (uint32_t)
3930 			    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3931 			dmap->addr.address64.High = (uint32_t)
3932 			    (cmd->cmd_cookie.dmac_laddress >> 32);
3933 		}
3934 
3935 		/*
3936 		 * If this was partially allocated we set the resid
3937 		 * the amount of data NOT transferred in this window
3938 		 * If there is only one window, the resid will be 0
3939 		 */
3940 		pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3941 		NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3942 	}
3943 	return (pkt);
3944 }
3945 
3946 /*
3947  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3948  *
3949  * Notes:
3950  *	- also frees DMA resources if allocated
3951  *	- implicit DMA synchonization
3952  */
3953 static void
3954 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3955 {
3956 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3957 	mptsas_t	*mpt = ADDR2MPT(ap);
3958 
3959 	NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3960 	    ap->a_target, (void *)pkt));
3961 
3962 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3963 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3964 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
3965 	}
3966 
3967 	if (cmd->cmd_sg) {
3968 		kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3969 		cmd->cmd_sg = NULL;
3970 	}
3971 
3972 	mptsas_free_extra_sgl_frame(mpt, cmd);
3973 
3974 	if ((cmd->cmd_flags &
3975 	    (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3976 	    CFLAG_SCBEXTERN)) == 0) {
3977 		cmd->cmd_flags = CFLAG_FREE;
3978 		kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3979 	} else {
3980 		mptsas_pkt_destroy_extern(mpt, cmd);
3981 	}
3982 }
3983 
3984 /*
3985  * kmem cache constructor and destructor:
3986  * When constructing, we bzero the cmd and allocate the dma handle
3987  * When destructing, just free the dma handle
3988  */
3989 static int
3990 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3991 {
3992 	mptsas_cmd_t		*cmd = buf;
3993 	mptsas_t		*mpt  = cdrarg;
3994 	struct scsi_address	ap;
3995 	uint_t			cookiec;
3996 	ddi_dma_attr_t		arq_dma_attr;
3997 	int			(*callback)(caddr_t);
3998 
3999 	callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4000 
4001 	NDBG4(("mptsas_kmem_cache_constructor"));
4002 
4003 	ap.a_hba_tran = mpt->m_tran;
4004 	ap.a_target = 0;
4005 	ap.a_lun = 0;
4006 
4007 	/*
4008 	 * allocate a dma handle
4009 	 */
4010 	if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
4011 	    NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
4012 		cmd->cmd_dmahandle = NULL;
4013 		return (-1);
4014 	}
4015 
4016 	cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
4017 	    SENSE_LENGTH, B_READ, callback, NULL);
4018 	if (cmd->cmd_arq_buf == NULL) {
4019 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
4020 		cmd->cmd_dmahandle = NULL;
4021 		return (-1);
4022 	}
4023 
4024 	/*
4025 	 * allocate a arq handle
4026 	 */
4027 	arq_dma_attr = mpt->m_msg_dma_attr;
4028 	arq_dma_attr.dma_attr_sgllen = 1;
4029 	if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
4030 	    NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
4031 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
4032 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
4033 		cmd->cmd_dmahandle = NULL;
4034 		cmd->cmd_arqhandle = NULL;
4035 		return (-1);
4036 	}
4037 
4038 	if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
4039 	    cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4040 	    callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
4041 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
4042 		ddi_dma_free_handle(&cmd->cmd_arqhandle);
4043 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
4044 		cmd->cmd_dmahandle = NULL;
4045 		cmd->cmd_arqhandle = NULL;
4046 		cmd->cmd_arq_buf = NULL;
4047 		return (-1);
4048 	}
4049 
4050 	return (0);
4051 }
4052 
4053 static void
4054 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4055 {
4056 #ifndef __lock_lint
4057 	_NOTE(ARGUNUSED(cdrarg))
4058 #endif
4059 	mptsas_cmd_t	*cmd = buf;
4060 
4061 	NDBG4(("mptsas_kmem_cache_destructor"));
4062 
4063 	if (cmd->cmd_arqhandle) {
4064 		(void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
4065 		ddi_dma_free_handle(&cmd->cmd_arqhandle);
4066 		cmd->cmd_arqhandle = NULL;
4067 	}
4068 	if (cmd->cmd_arq_buf) {
4069 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
4070 		cmd->cmd_arq_buf = NULL;
4071 	}
4072 	if (cmd->cmd_dmahandle) {
4073 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
4074 		cmd->cmd_dmahandle = NULL;
4075 	}
4076 }
4077 
4078 static int
4079 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4080 {
4081 	mptsas_cache_frames_t	*p = buf;
4082 	mptsas_t		*mpt = cdrarg;
4083 	ddi_dma_attr_t		frame_dma_attr;
4084 	size_t			mem_size, alloc_len;
4085 	ddi_dma_cookie_t	cookie;
4086 	uint_t			ncookie;
4087 	int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4088 	    ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4089 
4090 	frame_dma_attr = mpt->m_msg_dma_attr;
4091 	frame_dma_attr.dma_attr_align = 0x10;
4092 	frame_dma_attr.dma_attr_sgllen = 1;
4093 
4094 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4095 	    &p->m_dma_hdl) != DDI_SUCCESS) {
4096 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4097 		    " extra SGL.");
4098 		return (DDI_FAILURE);
4099 	}
4100 
4101 	mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4102 
4103 	if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4104 	    DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4105 	    &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4106 		ddi_dma_free_handle(&p->m_dma_hdl);
4107 		p->m_dma_hdl = NULL;
4108 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4109 		    " extra SGL.");
4110 		return (DDI_FAILURE);
4111 	}
4112 
4113 	if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4114 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4115 	    &cookie, &ncookie) != DDI_DMA_MAPPED) {
4116 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
4117 		ddi_dma_free_handle(&p->m_dma_hdl);
4118 		p->m_dma_hdl = NULL;
4119 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4120 		    " extra SGL");
4121 		return (DDI_FAILURE);
4122 	}
4123 
4124 	/*
4125 	 * Store the SGL memory address.  This chip uses this
4126 	 * address to dma to and from the driver.  The second
4127 	 * address is the address mpt uses to fill in the SGL.
4128 	 */
4129 	p->m_phys_addr = cookie.dmac_address;
4130 
4131 	return (DDI_SUCCESS);
4132 }
4133 
4134 static void
4135 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4136 {
4137 #ifndef __lock_lint
4138 	_NOTE(ARGUNUSED(cdrarg))
4139 #endif
4140 	mptsas_cache_frames_t	*p = buf;
4141 	if (p->m_dma_hdl != NULL) {
4142 		(void) ddi_dma_unbind_handle(p->m_dma_hdl);
4143 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
4144 		ddi_dma_free_handle(&p->m_dma_hdl);
4145 		p->m_phys_addr = NULL;
4146 		p->m_frames_addr = NULL;
4147 		p->m_dma_hdl = NULL;
4148 		p->m_acc_hdl = NULL;
4149 	}
4150 
4151 }
4152 
4153 /*
4154  * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4155  * for non-standard length cdb, pkt_private, status areas
4156  * if allocation fails, then deallocate all external space and the pkt
4157  */
4158 /* ARGSUSED */
4159 static int
4160 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4161     int cmdlen, int tgtlen, int statuslen, int kf)
4162 {
4163 	caddr_t			cdbp, scbp, tgt;
4164 	int			(*callback)(caddr_t) = (kf == KM_SLEEP) ?
4165 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
4166 	struct scsi_address	ap;
4167 	size_t			senselength;
4168 	ddi_dma_attr_t		ext_arq_dma_attr;
4169 	uint_t			cookiec;
4170 
4171 	NDBG3(("mptsas_pkt_alloc_extern: "
4172 	    "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4173 	    (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4174 
4175 	tgt = cdbp = scbp = NULL;
4176 	cmd->cmd_scblen		= statuslen;
4177 	cmd->cmd_privlen	= (uchar_t)tgtlen;
4178 
4179 	if (cmdlen > sizeof (cmd->cmd_cdb)) {
4180 		if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4181 			goto fail;
4182 		}
4183 		cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4184 		cmd->cmd_flags |= CFLAG_CDBEXTERN;
4185 	}
4186 	if (tgtlen > PKT_PRIV_LEN) {
4187 		if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4188 			goto fail;
4189 		}
4190 		cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4191 		cmd->cmd_pkt->pkt_private = tgt;
4192 	}
4193 	if (statuslen > EXTCMDS_STATUS_SIZE) {
4194 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4195 			goto fail;
4196 		}
4197 		cmd->cmd_flags |= CFLAG_SCBEXTERN;
4198 		cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4199 
4200 		/* allocate sense data buf for DMA */
4201 
4202 		senselength = statuslen - MPTSAS_GET_ITEM_OFF(
4203 		    struct scsi_arq_status, sts_sensedata);
4204 		cmd->cmd_rqslen = (uchar_t)senselength;
4205 
4206 		ap.a_hba_tran = mpt->m_tran;
4207 		ap.a_target = 0;
4208 		ap.a_lun = 0;
4209 
4210 		cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
4211 		    (struct buf *)NULL, senselength, B_READ,
4212 		    callback, NULL);
4213 
4214 		if (cmd->cmd_ext_arq_buf == NULL) {
4215 			goto fail;
4216 		}
4217 		/*
4218 		 * allocate a extern arq handle and bind the buf
4219 		 */
4220 		ext_arq_dma_attr = mpt->m_msg_dma_attr;
4221 		ext_arq_dma_attr.dma_attr_sgllen = 1;
4222 		if ((ddi_dma_alloc_handle(mpt->m_dip,
4223 		    &ext_arq_dma_attr, callback,
4224 		    NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
4225 			goto fail;
4226 		}
4227 
4228 		if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
4229 		    cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4230 		    callback, NULL, &cmd->cmd_ext_arqcookie,
4231 		    &cookiec)
4232 		    != DDI_SUCCESS) {
4233 			goto fail;
4234 		}
4235 		cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4236 	}
4237 	return (0);
4238 fail:
4239 	mptsas_pkt_destroy_extern(mpt, cmd);
4240 	return (1);
4241 }
4242 
4243 /*
4244  * deallocate external pkt space and deallocate the pkt
4245  */
4246 static void
4247 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4248 {
4249 	NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4250 
4251 	if (cmd->cmd_flags & CFLAG_FREE) {
4252 		mptsas_log(mpt, CE_PANIC,
4253 		    "mptsas_pkt_destroy_extern: freeing free packet");
4254 		_NOTE(NOT_REACHED)
4255 		/* NOTREACHED */
4256 	}
4257 	if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4258 		kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4259 	}
4260 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4261 		kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4262 		if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4263 			(void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4264 		}
4265 		if (cmd->cmd_ext_arqhandle) {
4266 			ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4267 			cmd->cmd_ext_arqhandle = NULL;
4268 		}
4269 		if (cmd->cmd_ext_arq_buf)
4270 			scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4271 	}
4272 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4273 		kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4274 	}
4275 	cmd->cmd_flags = CFLAG_FREE;
4276 	kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4277 }
4278 
4279 /*
4280  * tran_sync_pkt(9E) - explicit DMA synchronization
4281  */
4282 /*ARGSUSED*/
4283 static void
4284 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4285 {
4286 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4287 
4288 	NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4289 	    ap->a_target, (void *)pkt));
4290 
4291 	if (cmd->cmd_dmahandle) {
4292 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4293 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
4294 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4295 	}
4296 }
4297 
4298 /*
4299  * tran_dmafree(9E) - deallocate DMA resources allocated for command
4300  */
4301 /*ARGSUSED*/
4302 static void
4303 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4304 {
4305 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4306 	mptsas_t	*mpt = ADDR2MPT(ap);
4307 
4308 	NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4309 	    ap->a_target, (void *)pkt));
4310 
4311 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4312 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4313 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
4314 	}
4315 
4316 	if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4317 		(void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4318 		cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4319 	}
4320 
4321 	mptsas_free_extra_sgl_frame(mpt, cmd);
4322 }
4323 
4324 static void
4325 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4326 {
4327 	if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4328 	    (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4329 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4330 		    DDI_DMA_SYNC_FORCPU);
4331 	}
4332 	(*pkt->pkt_comp)(pkt);
4333 }
4334 
4335 static void
4336 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4337 	pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4338 {
4339 	uint_t			cookiec;
4340 	mptti_t			*dmap;
4341 	uint32_t		flags;
4342 	pMpi2SGESimple64_t	sge;
4343 	pMpi2SGEChain64_t	sgechain;
4344 	ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4345 
4346 	/*
4347 	 * Save the number of entries in the DMA
4348 	 * Scatter/Gather list
4349 	 */
4350 	cookiec = cmd->cmd_cookiec;
4351 
4352 	NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4353 
4354 	/*
4355 	 * Set read/write bit in control.
4356 	 */
4357 	if (cmd->cmd_flags & CFLAG_DMASEND) {
4358 		*control |= MPI2_SCSIIO_CONTROL_WRITE;
4359 	} else {
4360 		*control |= MPI2_SCSIIO_CONTROL_READ;
4361 	}
4362 
4363 	ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4364 
4365 	/*
4366 	 * We have 2 cases here.  First where we can fit all the
4367 	 * SG elements into the main frame, and the case
4368 	 * where we can't.
4369 	 * If we have more cookies than we can attach to a frame
4370 	 * we will need to use a chain element to point
4371 	 * a location of memory where the rest of the S/G
4372 	 * elements reside.
4373 	 */
4374 	if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4375 		dmap = cmd->cmd_sg;
4376 		sge = (pMpi2SGESimple64_t)(&frame->SGL);
4377 		while (cookiec--) {
4378 			ddi_put32(acc_hdl,
4379 			    &sge->Address.Low, dmap->addr.address64.Low);
4380 			ddi_put32(acc_hdl,
4381 			    &sge->Address.High, dmap->addr.address64.High);
4382 			ddi_put32(acc_hdl, &sge->FlagsLength,
4383 			    dmap->count);
4384 			flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4385 			flags |= ((uint32_t)
4386 			    (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4387 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4388 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4389 			    MPI2_SGE_FLAGS_SHIFT);
4390 
4391 			/*
4392 			 * If this is the last cookie, we set the flags
4393 			 * to indicate so
4394 			 */
4395 			if (cookiec == 0) {
4396 				flags |=
4397 				    ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4398 				    | MPI2_SGE_FLAGS_END_OF_BUFFER
4399 				    | MPI2_SGE_FLAGS_END_OF_LIST) <<
4400 				    MPI2_SGE_FLAGS_SHIFT);
4401 			}
4402 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4403 				flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4404 				    MPI2_SGE_FLAGS_SHIFT);
4405 			} else {
4406 				flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4407 				    MPI2_SGE_FLAGS_SHIFT);
4408 			}
4409 			ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4410 			dmap++;
4411 			sge++;
4412 		}
4413 	} else {
4414 		/*
4415 		 * Hereby we start to deal with multiple frames.
4416 		 * The process is as follows:
4417 		 * 1. Determine how many frames are needed for SGL element
4418 		 *    storage; Note that all frames are stored in contiguous
4419 		 *    memory space and in 64-bit DMA mode each element is
4420 		 *    3 double-words (12 bytes) long.
4421 		 * 2. Fill up the main frame. We need to do this separately
4422 		 *    since it contains the SCSI IO request header and needs
4423 		 *    dedicated processing. Note that the last 4 double-words
4424 		 *    of the SCSI IO header is for SGL element storage
4425 		 *    (MPI2_SGE_IO_UNION).
4426 		 * 3. Fill the chain element in the main frame, so the DMA
4427 		 *    engine can use the following frames.
4428 		 * 4. Enter a loop to fill the remaining frames. Note that the
4429 		 *    last frame contains no chain element.  The remaining
4430 		 *    frames go into the mpt SGL buffer allocated on the fly,
4431 		 *    not immediately following the main message frame, as in
4432 		 *    Gen1.
4433 		 * Some restrictions:
4434 		 * 1. For 64-bit DMA, the simple element and chain element
4435 		 *    are both of 3 double-words (12 bytes) in size, even
4436 		 *    though all frames are stored in the first 4G of mem
4437 		 *    range and the higher 32-bits of the address are always 0.
4438 		 * 2. On some controllers (like the 1064/1068), a frame can
4439 		 *    hold SGL elements with the last 1 or 2 double-words
4440 		 *    (4 or 8 bytes) un-used. On these controllers, we should
4441 		 *    recognize that there's not enough room for another SGL
4442 		 *    element and move the sge pointer to the next frame.
4443 		 */
4444 		int		i, j, k, l, frames, sgemax;
4445 		int		temp;
4446 		uint8_t		chainflags;
4447 		uint16_t	chainlength;
4448 		mptsas_cache_frames_t *p;
4449 
4450 		/*
4451 		 * Sgemax is the number of SGE's that will fit
4452 		 * each extra frame and frames is total
4453 		 * number of frames we'll need.  1 sge entry per
4454 		 * frame is reseverd for the chain element thus the -1 below.
4455 		 */
4456 		sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4457 		    - 1);
4458 		temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4459 
4460 		/*
4461 		 * A little check to see if we need to round up the number
4462 		 * of frames we need
4463 		 */
4464 		if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4465 		    sgemax) > 1) {
4466 			frames = (temp + 1);
4467 		} else {
4468 			frames = temp;
4469 		}
4470 		dmap = cmd->cmd_sg;
4471 		sge = (pMpi2SGESimple64_t)(&frame->SGL);
4472 
4473 		/*
4474 		 * First fill in the main frame
4475 		 */
4476 		for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4477 			ddi_put32(acc_hdl, &sge->Address.Low,
4478 			    dmap->addr.address64.Low);
4479 			ddi_put32(acc_hdl, &sge->Address.High,
4480 			    dmap->addr.address64.High);
4481 			ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4482 			flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4483 			flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4484 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4485 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4486 			    MPI2_SGE_FLAGS_SHIFT);
4487 
4488 			/*
4489 			 * If this is the last SGE of this frame
4490 			 * we set the end of list flag
4491 			 */
4492 			if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4493 				flags |= ((uint32_t)
4494 				    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4495 				    MPI2_SGE_FLAGS_SHIFT);
4496 			}
4497 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4498 				flags |=
4499 				    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4500 				    MPI2_SGE_FLAGS_SHIFT);
4501 			} else {
4502 				flags |=
4503 				    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4504 				    MPI2_SGE_FLAGS_SHIFT);
4505 			}
4506 			ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4507 			dmap++;
4508 			sge++;
4509 		}
4510 
4511 		/*
4512 		 * Fill in the chain element in the main frame.
4513 		 * About calculation on ChainOffset:
4514 		 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4515 		 *    in the end reserved for SGL element storage
4516 		 *    (MPI2_SGE_IO_UNION); we should count it in our
4517 		 *    calculation.  See its definition in the header file.
4518 		 * 2. Constant j is the counter of the current SGL element
4519 		 *    that will be processed, and (j - 1) is the number of
4520 		 *    SGL elements that have been processed (stored in the
4521 		 *    main frame).
4522 		 * 3. ChainOffset value should be in units of double-words (4
4523 		 *    bytes) so the last value should be divided by 4.
4524 		 */
4525 		ddi_put8(acc_hdl, &frame->ChainOffset,
4526 		    (sizeof (MPI2_SCSI_IO_REQUEST) -
4527 		    sizeof (MPI2_SGE_IO_UNION) +
4528 		    (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4529 		sgechain = (pMpi2SGEChain64_t)sge;
4530 		chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4531 		    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4532 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4533 		ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4534 
4535 		/*
4536 		 * The size of the next frame is the accurate size of space
4537 		 * (in bytes) used to store the SGL elements. j is the counter
4538 		 * of SGL elements. (j - 1) is the number of SGL elements that
4539 		 * have been processed (stored in frames).
4540 		 */
4541 		if (frames >= 2) {
4542 			chainlength = mpt->m_req_frame_size /
4543 			    sizeof (MPI2_SGE_SIMPLE64) *
4544 			    sizeof (MPI2_SGE_SIMPLE64);
4545 		} else {
4546 			chainlength = ((cookiec - (j - 1)) *
4547 			    sizeof (MPI2_SGE_SIMPLE64));
4548 		}
4549 
4550 		p = cmd->cmd_extra_frames;
4551 
4552 		ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4553 		ddi_put32(acc_hdl, &sgechain->Address.Low,
4554 		    p->m_phys_addr);
4555 		/* SGL is allocated in the first 4G mem range */
4556 		ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4557 
4558 		/*
4559 		 * If there are more than 2 frames left we have to
4560 		 * fill in the next chain offset to the location of
4561 		 * the chain element in the next frame.
4562 		 * sgemax is the number of simple elements in an extra
4563 		 * frame. Note that the value NextChainOffset should be
4564 		 * in double-words (4 bytes).
4565 		 */
4566 		if (frames >= 2) {
4567 			ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4568 			    (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4569 		} else {
4570 			ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4571 		}
4572 
4573 		/*
4574 		 * Jump to next frame;
4575 		 * Starting here, chain buffers go into the per command SGL.
4576 		 * This buffer is allocated when chain buffers are needed.
4577 		 */
4578 		sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4579 		i = cookiec;
4580 
4581 		/*
4582 		 * Start filling in frames with SGE's.  If we
4583 		 * reach the end of frame and still have SGE's
4584 		 * to fill we need to add a chain element and
4585 		 * use another frame.  j will be our counter
4586 		 * for what cookie we are at and i will be
4587 		 * the total cookiec. k is the current frame
4588 		 */
4589 		for (k = 1; k <= frames; k++) {
4590 			for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4591 
4592 				/*
4593 				 * If we have reached the end of frame
4594 				 * and we have more SGE's to fill in
4595 				 * we have to fill the final entry
4596 				 * with a chain element and then
4597 				 * continue to the next frame
4598 				 */
4599 				if ((l == (sgemax + 1)) && (k != frames)) {
4600 					sgechain = (pMpi2SGEChain64_t)sge;
4601 					j--;
4602 					chainflags = (
4603 					    MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4604 					    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4605 					    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4606 					ddi_put8(p->m_acc_hdl,
4607 					    &sgechain->Flags, chainflags);
4608 					/*
4609 					 * k is the frame counter and (k + 1)
4610 					 * is the number of the next frame.
4611 					 * Note that frames are in contiguous
4612 					 * memory space.
4613 					 */
4614 					ddi_put32(p->m_acc_hdl,
4615 					    &sgechain->Address.Low,
4616 					    (p->m_phys_addr +
4617 					    (mpt->m_req_frame_size * k)));
4618 					ddi_put32(p->m_acc_hdl,
4619 					    &sgechain->Address.High, 0);
4620 
4621 					/*
4622 					 * If there are more than 2 frames left
4623 					 * we have to next chain offset to
4624 					 * the location of the chain element
4625 					 * in the next frame and fill in the
4626 					 * length of the next chain
4627 					 */
4628 					if ((frames - k) >= 2) {
4629 						ddi_put8(p->m_acc_hdl,
4630 						    &sgechain->NextChainOffset,
4631 						    (sgemax *
4632 						    sizeof (MPI2_SGE_SIMPLE64))
4633 						    >> 2);
4634 						ddi_put16(p->m_acc_hdl,
4635 						    &sgechain->Length,
4636 						    mpt->m_req_frame_size /
4637 						    sizeof (MPI2_SGE_SIMPLE64) *
4638 						    sizeof (MPI2_SGE_SIMPLE64));
4639 					} else {
4640 						/*
4641 						 * This is the last frame. Set
4642 						 * the NextChainOffset to 0 and
4643 						 * Length is the total size of
4644 						 * all remaining simple elements
4645 						 */
4646 						ddi_put8(p->m_acc_hdl,
4647 						    &sgechain->NextChainOffset,
4648 						    0);
4649 						ddi_put16(p->m_acc_hdl,
4650 						    &sgechain->Length,
4651 						    (cookiec - j) *
4652 						    sizeof (MPI2_SGE_SIMPLE64));
4653 					}
4654 
4655 					/* Jump to the next frame */
4656 					sge = (pMpi2SGESimple64_t)
4657 					    ((char *)p->m_frames_addr +
4658 					    (int)mpt->m_req_frame_size * k);
4659 
4660 					continue;
4661 				}
4662 
4663 				ddi_put32(p->m_acc_hdl,
4664 				    &sge->Address.Low,
4665 				    dmap->addr.address64.Low);
4666 				ddi_put32(p->m_acc_hdl,
4667 				    &sge->Address.High,
4668 				    dmap->addr.address64.High);
4669 				ddi_put32(p->m_acc_hdl,
4670 				    &sge->FlagsLength, dmap->count);
4671 				flags = ddi_get32(p->m_acc_hdl,
4672 				    &sge->FlagsLength);
4673 				flags |= ((uint32_t)(
4674 				    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4675 				    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4676 				    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4677 				    MPI2_SGE_FLAGS_SHIFT);
4678 
4679 				/*
4680 				 * If we are at the end of the frame and
4681 				 * there is another frame to fill in
4682 				 * we set the last simple element as last
4683 				 * element
4684 				 */
4685 				if ((l == sgemax) && (k != frames)) {
4686 					flags |= ((uint32_t)
4687 					    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4688 					    MPI2_SGE_FLAGS_SHIFT);
4689 				}
4690 
4691 				/*
4692 				 * If this is the final cookie we
4693 				 * indicate it by setting the flags
4694 				 */
4695 				if (j == i) {
4696 					flags |= ((uint32_t)
4697 					    (MPI2_SGE_FLAGS_LAST_ELEMENT |
4698 					    MPI2_SGE_FLAGS_END_OF_BUFFER |
4699 					    MPI2_SGE_FLAGS_END_OF_LIST) <<
4700 					    MPI2_SGE_FLAGS_SHIFT);
4701 				}
4702 				if (cmd->cmd_flags & CFLAG_DMASEND) {
4703 					flags |=
4704 					    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4705 					    MPI2_SGE_FLAGS_SHIFT);
4706 				} else {
4707 					flags |=
4708 					    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4709 					    MPI2_SGE_FLAGS_SHIFT);
4710 				}
4711 				ddi_put32(p->m_acc_hdl,
4712 				    &sge->FlagsLength, flags);
4713 				dmap++;
4714 				sge++;
4715 			}
4716 		}
4717 
4718 		/*
4719 		 * Sync DMA with the chain buffers that were just created
4720 		 */
4721 		(void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4722 	}
4723 }
4724 
4725 /*
4726  * Interrupt handling
4727  * Utility routine.  Poll for status of a command sent to HBA
4728  * without interrupts (a FLAG_NOINTR command).
4729  */
4730 int
4731 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4732 {
4733 	int	rval = TRUE;
4734 
4735 	NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4736 
4737 	if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4738 		mptsas_restart_hba(mpt);
4739 	}
4740 
4741 	/*
4742 	 * Wait, using drv_usecwait(), long enough for the command to
4743 	 * reasonably return from the target if the target isn't
4744 	 * "dead".  A polled command may well be sent from scsi_poll, and
4745 	 * there are retries built in to scsi_poll if the transport
4746 	 * accepted the packet (TRAN_ACCEPT).  scsi_poll waits 1 second
4747 	 * and retries the transport up to scsi_poll_busycnt times
4748 	 * (currently 60) if
4749 	 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4750 	 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4751 	 *
4752 	 * limit the waiting to avoid a hang in the event that the
4753 	 * cmd never gets started but we are still receiving interrupts
4754 	 */
4755 	while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4756 		if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4757 			NDBG5(("mptsas_poll: command incomplete"));
4758 			rval = FALSE;
4759 			break;
4760 		}
4761 	}
4762 
4763 	if (rval == FALSE) {
4764 
4765 		/*
4766 		 * this isn't supposed to happen, the hba must be wedged
4767 		 * Mark this cmd as a timeout.
4768 		 */
4769 		mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4770 		    (STAT_TIMEOUT|STAT_ABORTED));
4771 
4772 		if (poll_cmd->cmd_queued == FALSE) {
4773 
4774 			NDBG5(("mptsas_poll: not on waitq"));
4775 
4776 			poll_cmd->cmd_pkt->pkt_state |=
4777 			    (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4778 		} else {
4779 
4780 			/* find and remove it from the waitq */
4781 			NDBG5(("mptsas_poll: delete from waitq"));
4782 			mptsas_waitq_delete(mpt, poll_cmd);
4783 		}
4784 
4785 	}
4786 	mptsas_fma_check(mpt, poll_cmd);
4787 	NDBG5(("mptsas_poll: done"));
4788 	return (rval);
4789 }
4790 
4791 /*
4792  * Used for polling cmds and TM function
4793  */
4794 static int
4795 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4796 {
4797 	int				cnt;
4798 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
4799 	uint32_t			int_mask;
4800 
4801 	NDBG5(("mptsas_wait_intr"));
4802 
4803 	mpt->m_polled_intr = 1;
4804 
4805 	/*
4806 	 * Get the current interrupt mask and disable interrupts.  When
4807 	 * re-enabling ints, set mask to saved value.
4808 	 */
4809 	int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4810 	MPTSAS_DISABLE_INTR(mpt);
4811 
4812 	/*
4813 	 * Keep polling for at least (polltime * 1000) seconds
4814 	 */
4815 	for (cnt = 0; cnt < polltime; cnt++) {
4816 		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4817 		    DDI_DMA_SYNC_FORCPU);
4818 
4819 		reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4820 		    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4821 
4822 		if (ddi_get32(mpt->m_acc_post_queue_hdl,
4823 		    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4824 		    ddi_get32(mpt->m_acc_post_queue_hdl,
4825 		    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4826 			drv_usecwait(1000);
4827 			continue;
4828 		}
4829 
4830 		/*
4831 		 * The reply is valid, process it according to its
4832 		 * type.
4833 		 */
4834 		mptsas_process_intr(mpt, reply_desc_union);
4835 
4836 		if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4837 			mpt->m_post_index = 0;
4838 		}
4839 
4840 		/*
4841 		 * Update the global reply index
4842 		 */
4843 		ddi_put32(mpt->m_datap,
4844 		    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4845 		mpt->m_polled_intr = 0;
4846 
4847 		/*
4848 		 * Re-enable interrupts and quit.
4849 		 */
4850 		ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4851 		    int_mask);
4852 		return (TRUE);
4853 
4854 	}
4855 
4856 	/*
4857 	 * Clear polling flag, re-enable interrupts and quit.
4858 	 */
4859 	mpt->m_polled_intr = 0;
4860 	ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4861 	return (FALSE);
4862 }
4863 
4864 static void
4865 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4866     pMpi2ReplyDescriptorsUnion_t reply_desc)
4867 {
4868 	pMpi2SCSIIOSuccessReplyDescriptor_t	scsi_io_success;
4869 	uint16_t				SMID;
4870 	mptsas_slots_t				*slots = mpt->m_active;
4871 	mptsas_cmd_t				*cmd = NULL;
4872 	struct scsi_pkt				*pkt;
4873 
4874 	ASSERT(mutex_owned(&mpt->m_mutex));
4875 
4876 	scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4877 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4878 
4879 	/*
4880 	 * This is a success reply so just complete the IO.  First, do a sanity
4881 	 * check on the SMID.  The final slot is used for TM requests, which
4882 	 * would not come into this reply handler.
4883 	 */
4884 	if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4885 		mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4886 		    SMID);
4887 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4888 		return;
4889 	}
4890 
4891 	cmd = slots->m_slot[SMID];
4892 
4893 	/*
4894 	 * print warning and return if the slot is empty
4895 	 */
4896 	if (cmd == NULL) {
4897 		mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4898 		    "in slot %d", SMID);
4899 		return;
4900 	}
4901 
4902 	pkt = CMD2PKT(cmd);
4903 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4904 	    STATE_GOT_STATUS);
4905 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4906 		pkt->pkt_state |= STATE_XFERRED_DATA;
4907 	}
4908 	pkt->pkt_resid = 0;
4909 
4910 	if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4911 		cmd->cmd_flags |= CFLAG_FINISHED;
4912 		cv_broadcast(&mpt->m_passthru_cv);
4913 		return;
4914 	} else {
4915 		mptsas_remove_cmd(mpt, cmd);
4916 	}
4917 
4918 	if (cmd->cmd_flags & CFLAG_RETRY) {
4919 		/*
4920 		 * The target returned QFULL or busy, do not add tihs
4921 		 * pkt to the doneq since the hba will retry
4922 		 * this cmd.
4923 		 *
4924 		 * The pkt has already been resubmitted in
4925 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4926 		 * Remove this cmd_flag here.
4927 		 */
4928 		cmd->cmd_flags &= ~CFLAG_RETRY;
4929 	} else {
4930 		mptsas_doneq_add(mpt, cmd);
4931 	}
4932 }
4933 
4934 static void
4935 mptsas_handle_address_reply(mptsas_t *mpt,
4936     pMpi2ReplyDescriptorsUnion_t reply_desc)
4937 {
4938 	pMpi2AddressReplyDescriptor_t	address_reply;
4939 	pMPI2DefaultReply_t		reply;
4940 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
4941 	uint32_t			reply_addr;
4942 	uint16_t			SMID, iocstatus;
4943 	mptsas_slots_t			*slots = mpt->m_active;
4944 	mptsas_cmd_t			*cmd = NULL;
4945 	uint8_t				function, buffer_type;
4946 	m_replyh_arg_t			*args;
4947 	int				reply_frame_no;
4948 
4949 	ASSERT(mutex_owned(&mpt->m_mutex));
4950 
4951 	address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4952 	reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4953 	    &address_reply->ReplyFrameAddress);
4954 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4955 
4956 	/*
4957 	 * If reply frame is not in the proper range we should ignore this
4958 	 * message and exit the interrupt handler.
4959 	 */
4960 	if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4961 	    (reply_addr >= (mpt->m_reply_frame_dma_addr +
4962 	    (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4963 	    ((reply_addr - mpt->m_reply_frame_dma_addr) %
4964 	    mpt->m_reply_frame_size != 0)) {
4965 		mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4966 		    "address 0x%x\n", reply_addr);
4967 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4968 		return;
4969 	}
4970 
4971 	(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4972 	    DDI_DMA_SYNC_FORCPU);
4973 	reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4974 	    mpt->m_reply_frame_dma_addr));
4975 	function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4976 
4977 	/*
4978 	 * don't get slot information and command for events since these values
4979 	 * don't exist
4980 	 */
4981 	if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4982 	    (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4983 		/*
4984 		 * This could be a TM reply, which use the last allocated SMID,
4985 		 * so allow for that.
4986 		 */
4987 		if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4988 			mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4989 			    "%d\n", SMID);
4990 			ddi_fm_service_impact(mpt->m_dip,
4991 			    DDI_SERVICE_UNAFFECTED);
4992 			return;
4993 		}
4994 
4995 		cmd = slots->m_slot[SMID];
4996 
4997 		/*
4998 		 * print warning and return if the slot is empty
4999 		 */
5000 		if (cmd == NULL) {
5001 			mptsas_log(mpt, CE_WARN, "?NULL command for address "
5002 			    "reply in slot %d", SMID);
5003 			return;
5004 		}
5005 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
5006 		    (cmd->cmd_flags & CFLAG_CONFIG) ||
5007 		    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
5008 			cmd->cmd_rfm = reply_addr;
5009 			cmd->cmd_flags |= CFLAG_FINISHED;
5010 			cv_broadcast(&mpt->m_passthru_cv);
5011 			cv_broadcast(&mpt->m_config_cv);
5012 			cv_broadcast(&mpt->m_fw_diag_cv);
5013 			return;
5014 		} else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5015 			mptsas_remove_cmd(mpt, cmd);
5016 		}
5017 		NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5018 	}
5019 	/*
5020 	 * Depending on the function, we need to handle
5021 	 * the reply frame (and cmd) differently.
5022 	 */
5023 	switch (function) {
5024 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
5025 		mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5026 		break;
5027 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
5028 		cmd->cmd_rfm = reply_addr;
5029 		mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5030 		    cmd);
5031 		break;
5032 	case MPI2_FUNCTION_FW_DOWNLOAD:
5033 		cmd->cmd_flags |= CFLAG_FINISHED;
5034 		cv_signal(&mpt->m_fw_cv);
5035 		break;
5036 	case MPI2_FUNCTION_EVENT_NOTIFICATION:
5037 		reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
5038 		    mpt->m_reply_frame_size;
5039 		args = &mpt->m_replyh_args[reply_frame_no];
5040 		args->mpt = (void *)mpt;
5041 		args->rfm = reply_addr;
5042 
5043 		/*
5044 		 * Record the event if its type is enabled in
5045 		 * this mpt instance by ioctl.
5046 		 */
5047 		mptsas_record_event(args);
5048 
5049 		/*
5050 		 * Handle time critical events
5051 		 * NOT_RESPONDING/ADDED only now
5052 		 */
5053 		if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5054 			/*
5055 			 * Would not return main process,
5056 			 * just let taskq resolve ack action
5057 			 * and ack would be sent in taskq thread
5058 			 */
5059 			NDBG20(("send mptsas_handle_event_sync success"));
5060 		}
5061 		if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5062 		    (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5063 			mptsas_log(mpt, CE_WARN, "No memory available"
5064 			"for dispatch taskq");
5065 			/*
5066 			 * Return the reply frame to the free queue.
5067 			 */
5068 			ddi_put32(mpt->m_acc_free_queue_hdl,
5069 			    &((uint32_t *)(void *)
5070 			    mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5071 			(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5072 			    DDI_DMA_SYNC_FORDEV);
5073 			if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5074 				mpt->m_free_index = 0;
5075 			}
5076 
5077 			ddi_put32(mpt->m_datap,
5078 			    &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5079 		}
5080 		return;
5081 	case MPI2_FUNCTION_DIAG_BUFFER_POST:
5082 		/*
5083 		 * If SMID is 0, this implies that the reply is due to a
5084 		 * release function with a status that the buffer has been
5085 		 * released.  Set the buffer flags accordingly.
5086 		 */
5087 		if (SMID == 0) {
5088 			iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5089 			    &reply->IOCStatus);
5090 			buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5091 			    &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5092 			if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5093 				pBuffer =
5094 				    &mpt->m_fw_diag_buffer_list[buffer_type];
5095 				pBuffer->valid_data = TRUE;
5096 				pBuffer->owned_by_firmware = FALSE;
5097 				pBuffer->immediate = FALSE;
5098 			}
5099 		} else {
5100 			/*
5101 			 * Normal handling of diag post reply with SMID.
5102 			 */
5103 			cmd = slots->m_slot[SMID];
5104 
5105 			/*
5106 			 * print warning and return if the slot is empty
5107 			 */
5108 			if (cmd == NULL) {
5109 				mptsas_log(mpt, CE_WARN, "?NULL command for "
5110 				    "address reply in slot %d", SMID);
5111 				return;
5112 			}
5113 			cmd->cmd_rfm = reply_addr;
5114 			cmd->cmd_flags |= CFLAG_FINISHED;
5115 			cv_broadcast(&mpt->m_fw_diag_cv);
5116 		}
5117 		return;
5118 	default:
5119 		mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5120 		break;
5121 	}
5122 
5123 	/*
5124 	 * Return the reply frame to the free queue.
5125 	 */
5126 	ddi_put32(mpt->m_acc_free_queue_hdl,
5127 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5128 	    reply_addr);
5129 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5130 	    DDI_DMA_SYNC_FORDEV);
5131 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5132 		mpt->m_free_index = 0;
5133 	}
5134 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5135 	    mpt->m_free_index);
5136 
5137 	if (cmd->cmd_flags & CFLAG_FW_CMD)
5138 		return;
5139 
5140 	if (cmd->cmd_flags & CFLAG_RETRY) {
5141 		/*
5142 		 * The target returned QFULL or busy, do not add tihs
5143 		 * pkt to the doneq since the hba will retry
5144 		 * this cmd.
5145 		 *
5146 		 * The pkt has already been resubmitted in
5147 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5148 		 * Remove this cmd_flag here.
5149 		 */
5150 		cmd->cmd_flags &= ~CFLAG_RETRY;
5151 	} else {
5152 		mptsas_doneq_add(mpt, cmd);
5153 	}
5154 }
5155 
5156 static void
5157 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5158     mptsas_cmd_t *cmd)
5159 {
5160 	uint8_t			scsi_status, scsi_state;
5161 	uint16_t		ioc_status;
5162 	uint32_t		xferred, sensecount, responsedata, loginfo = 0;
5163 	struct scsi_pkt		*pkt;
5164 	struct scsi_arq_status	*arqstat;
5165 	struct buf		*bp;
5166 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
5167 	uint8_t			*sensedata = NULL;
5168 
5169 	if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5170 	    (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5171 		bp = cmd->cmd_ext_arq_buf;
5172 	} else {
5173 		bp = cmd->cmd_arq_buf;
5174 	}
5175 
5176 	scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5177 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5178 	scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5179 	xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5180 	sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5181 	responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5182 	    &reply->ResponseInfo);
5183 
5184 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5185 		loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5186 		    &reply->IOCLogInfo);
5187 		mptsas_log(mpt, CE_NOTE,
5188 		    "?Log info 0x%x received for target %d.\n"
5189 		    "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5190 		    loginfo, Tgt(cmd), scsi_status, ioc_status,
5191 		    scsi_state);
5192 	}
5193 
5194 	NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5195 	    scsi_status, ioc_status, scsi_state));
5196 
5197 	pkt = CMD2PKT(cmd);
5198 	*(pkt->pkt_scbp) = scsi_status;
5199 
5200 	if (loginfo == 0x31170000) {
5201 		/*
5202 		 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5203 		 * 0x31170000 comes, that means the device missing delay
5204 		 * is in progressing, the command need retry later.
5205 		 */
5206 		*(pkt->pkt_scbp) = STATUS_BUSY;
5207 		return;
5208 	}
5209 
5210 	if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5211 	    ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5212 	    MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5213 		pkt->pkt_reason = CMD_INCOMPLETE;
5214 		pkt->pkt_state |= STATE_GOT_BUS;
5215 		if (ptgt->m_reset_delay == 0) {
5216 			mptsas_set_throttle(mpt, ptgt,
5217 			    DRAIN_THROTTLE);
5218 		}
5219 		return;
5220 	}
5221 
5222 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5223 		responsedata &= 0x000000FF;
5224 		if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5225 			mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5226 			pkt->pkt_reason = CMD_TLR_OFF;
5227 			return;
5228 		}
5229 	}
5230 
5231 
5232 	switch (scsi_status) {
5233 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5234 		pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5235 		arqstat = (void*)(pkt->pkt_scbp);
5236 		arqstat->sts_rqpkt_status = *((struct scsi_status *)
5237 		    (pkt->pkt_scbp));
5238 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5239 		    STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5240 		if (cmd->cmd_flags & CFLAG_XARQ) {
5241 			pkt->pkt_state |= STATE_XARQ_DONE;
5242 		}
5243 		if (pkt->pkt_resid != cmd->cmd_dmacount) {
5244 			pkt->pkt_state |= STATE_XFERRED_DATA;
5245 		}
5246 		arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5247 		arqstat->sts_rqpkt_state  = pkt->pkt_state;
5248 		arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5249 		arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5250 		sensedata = (uint8_t *)&arqstat->sts_sensedata;
5251 
5252 		bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5253 		    ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5254 		    cmd->cmd_rqslen));
5255 		arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5256 		cmd->cmd_flags |= CFLAG_CMDARQ;
5257 		/*
5258 		 * Set proper status for pkt if autosense was valid
5259 		 */
5260 		if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5261 			struct scsi_status zero_status = { 0 };
5262 			arqstat->sts_rqpkt_status = zero_status;
5263 		}
5264 
5265 		/*
5266 		 * ASC=0x47 is parity error
5267 		 * ASC=0x48 is initiator detected error received
5268 		 */
5269 		if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5270 		    ((scsi_sense_asc(sensedata) == 0x47) ||
5271 		    (scsi_sense_asc(sensedata) == 0x48))) {
5272 			mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5273 		}
5274 
5275 		/*
5276 		 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5277 		 * ASC/ASCQ=0x25/0x00 means invalid lun
5278 		 */
5279 		if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5280 		    (scsi_sense_asc(sensedata) == 0x3F) &&
5281 		    (scsi_sense_ascq(sensedata) == 0x0E)) ||
5282 		    ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5283 		    (scsi_sense_asc(sensedata) == 0x25) &&
5284 		    (scsi_sense_ascq(sensedata) == 0x00))) {
5285 			mptsas_topo_change_list_t *topo_node = NULL;
5286 
5287 			topo_node = kmem_zalloc(
5288 			    sizeof (mptsas_topo_change_list_t),
5289 			    KM_NOSLEEP);
5290 			if (topo_node == NULL) {
5291 				mptsas_log(mpt, CE_NOTE, "No memory"
5292 				    "resource for handle SAS dynamic"
5293 				    "reconfigure.\n");
5294 				break;
5295 			}
5296 			topo_node->mpt = mpt;
5297 			topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5298 			topo_node->un.phymask = ptgt->m_phymask;
5299 			topo_node->devhdl = ptgt->m_devhdl;
5300 			topo_node->object = (void *)ptgt;
5301 			topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5302 
5303 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5304 			    mptsas_handle_dr,
5305 			    (void *)topo_node,
5306 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
5307 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5308 				    "for handle SAS dynamic reconfigure"
5309 				    "failed. \n");
5310 			}
5311 		}
5312 		break;
5313 	case MPI2_SCSI_STATUS_GOOD:
5314 		switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5315 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5316 			pkt->pkt_reason = CMD_DEV_GONE;
5317 			pkt->pkt_state |= STATE_GOT_BUS;
5318 			if (ptgt->m_reset_delay == 0) {
5319 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5320 			}
5321 			NDBG31(("lost disk for target%d, command:%x",
5322 			    Tgt(cmd), pkt->pkt_cdbp[0]));
5323 			break;
5324 		case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5325 			NDBG31(("data overrun: xferred=%d", xferred));
5326 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5327 			pkt->pkt_reason = CMD_DATA_OVR;
5328 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5329 			    | STATE_SENT_CMD | STATE_GOT_STATUS
5330 			    | STATE_XFERRED_DATA);
5331 			pkt->pkt_resid = 0;
5332 			break;
5333 		case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5334 		case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5335 			NDBG31(("data underrun: xferred=%d", xferred));
5336 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5337 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5338 			    | STATE_SENT_CMD | STATE_GOT_STATUS);
5339 			pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5340 			if (pkt->pkt_resid != cmd->cmd_dmacount) {
5341 				pkt->pkt_state |= STATE_XFERRED_DATA;
5342 			}
5343 			break;
5344 		case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5345 			mptsas_set_pkt_reason(mpt,
5346 			    cmd, CMD_RESET, STAT_BUS_RESET);
5347 			break;
5348 		case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5349 		case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5350 			mptsas_set_pkt_reason(mpt,
5351 			    cmd, CMD_RESET, STAT_DEV_RESET);
5352 			break;
5353 		case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5354 		case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5355 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5356 			mptsas_set_pkt_reason(mpt,
5357 			    cmd, CMD_TERMINATED, STAT_TERMINATED);
5358 			break;
5359 		case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5360 		case MPI2_IOCSTATUS_BUSY:
5361 			/*
5362 			 * set throttles to drain
5363 			 */
5364 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5365 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5366 			while (ptgt != NULL) {
5367 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5368 
5369 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5370 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5371 			}
5372 
5373 			/*
5374 			 * retry command
5375 			 */
5376 			cmd->cmd_flags |= CFLAG_RETRY;
5377 			cmd->cmd_pkt_flags |= FLAG_HEAD;
5378 
5379 			(void) mptsas_accept_pkt(mpt, cmd);
5380 			break;
5381 		default:
5382 			mptsas_log(mpt, CE_WARN,
5383 			    "unknown ioc_status = %x\n", ioc_status);
5384 			mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5385 			    "count = %x, scsi_status = %x", scsi_state,
5386 			    xferred, scsi_status);
5387 			break;
5388 		}
5389 		break;
5390 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5391 		mptsas_handle_qfull(mpt, cmd);
5392 		break;
5393 	case MPI2_SCSI_STATUS_BUSY:
5394 		NDBG31(("scsi_status busy received"));
5395 		break;
5396 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5397 		NDBG31(("scsi_status reservation conflict received"));
5398 		break;
5399 	default:
5400 		mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5401 		    scsi_status, ioc_status);
5402 		mptsas_log(mpt, CE_WARN,
5403 		    "mptsas_process_intr: invalid scsi status\n");
5404 		break;
5405 	}
5406 }
5407 
5408 static void
5409 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5410 	mptsas_cmd_t *cmd)
5411 {
5412 	uint8_t		task_type;
5413 	uint16_t	ioc_status;
5414 	uint32_t	log_info;
5415 	uint16_t	dev_handle;
5416 	struct scsi_pkt *pkt = CMD2PKT(cmd);
5417 
5418 	task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5419 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5420 	log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5421 	dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5422 
5423 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5424 		mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5425 		    "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5426 		    task_type, ioc_status, log_info, dev_handle);
5427 		pkt->pkt_reason = CMD_INCOMPLETE;
5428 		return;
5429 	}
5430 
5431 	switch (task_type) {
5432 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5433 	case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5434 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5435 	case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5436 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5437 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5438 		break;
5439 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5440 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5441 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5442 		mptsas_flush_target(mpt, dev_handle, Lun(cmd), task_type);
5443 		break;
5444 	default:
5445 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5446 		    task_type);
5447 		mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5448 		break;
5449 	}
5450 }
5451 
5452 static void
5453 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5454 {
5455 	mptsas_t			*mpt = arg->mpt;
5456 	uint64_t			t = arg->t;
5457 	mptsas_cmd_t			*cmd;
5458 	struct scsi_pkt			*pkt;
5459 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
5460 
5461 	mutex_enter(&item->mutex);
5462 	while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5463 		if (!item->doneq) {
5464 			cv_wait(&item->cv, &item->mutex);
5465 		}
5466 		pkt = NULL;
5467 		if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5468 			cmd->cmd_flags |= CFLAG_COMPLETED;
5469 			pkt = CMD2PKT(cmd);
5470 		}
5471 		mutex_exit(&item->mutex);
5472 		if (pkt) {
5473 			mptsas_pkt_comp(pkt, cmd);
5474 		}
5475 		mutex_enter(&item->mutex);
5476 	}
5477 	mutex_exit(&item->mutex);
5478 	mutex_enter(&mpt->m_doneq_mutex);
5479 	mpt->m_doneq_thread_n--;
5480 	cv_broadcast(&mpt->m_doneq_thread_cv);
5481 	mutex_exit(&mpt->m_doneq_mutex);
5482 }
5483 
5484 
5485 /*
5486  * mpt interrupt handler.
5487  */
5488 static uint_t
5489 mptsas_intr(caddr_t arg1, caddr_t arg2)
5490 {
5491 	mptsas_t			*mpt = (void *)arg1;
5492 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
5493 	uchar_t				did_reply = FALSE;
5494 
5495 	NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5496 
5497 	mutex_enter(&mpt->m_mutex);
5498 
5499 	/*
5500 	 * If interrupts are shared by two channels then check whether this
5501 	 * interrupt is genuinely for this channel by making sure first the
5502 	 * chip is in high power state.
5503 	 */
5504 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
5505 	    (mpt->m_power_level != PM_LEVEL_D0)) {
5506 		mutex_exit(&mpt->m_mutex);
5507 		return (DDI_INTR_UNCLAIMED);
5508 	}
5509 
5510 	/*
5511 	 * If polling, interrupt was triggered by some shared interrupt because
5512 	 * IOC interrupts are disabled during polling, so polling routine will
5513 	 * handle any replies.  Considering this, if polling is happening,
5514 	 * return with interrupt unclaimed.
5515 	 */
5516 	if (mpt->m_polled_intr) {
5517 		mutex_exit(&mpt->m_mutex);
5518 		mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5519 		return (DDI_INTR_UNCLAIMED);
5520 	}
5521 
5522 	/*
5523 	 * Read the istat register.
5524 	 */
5525 	if ((INTPENDING(mpt)) != 0) {
5526 		/*
5527 		 * read fifo until empty.
5528 		 */
5529 #ifndef __lock_lint
5530 		_NOTE(CONSTCOND)
5531 #endif
5532 		while (TRUE) {
5533 			(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5534 			    DDI_DMA_SYNC_FORCPU);
5535 			reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5536 			    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5537 
5538 			if (ddi_get32(mpt->m_acc_post_queue_hdl,
5539 			    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5540 			    ddi_get32(mpt->m_acc_post_queue_hdl,
5541 			    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5542 				break;
5543 			}
5544 
5545 			/*
5546 			 * The reply is valid, process it according to its
5547 			 * type.  Also, set a flag for updating the reply index
5548 			 * after they've all been processed.
5549 			 */
5550 			did_reply = TRUE;
5551 
5552 			mptsas_process_intr(mpt, reply_desc_union);
5553 
5554 			/*
5555 			 * Increment post index and roll over if needed.
5556 			 */
5557 			if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5558 				mpt->m_post_index = 0;
5559 			}
5560 		}
5561 
5562 		/*
5563 		 * Update the global reply index if at least one reply was
5564 		 * processed.
5565 		 */
5566 		if (did_reply) {
5567 			ddi_put32(mpt->m_datap,
5568 			    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5569 		}
5570 	} else {
5571 		mutex_exit(&mpt->m_mutex);
5572 		return (DDI_INTR_UNCLAIMED);
5573 	}
5574 	NDBG1(("mptsas_intr complete"));
5575 
5576 	/*
5577 	 * If no helper threads are created, process the doneq in ISR. If
5578 	 * helpers are created, use the doneq length as a metric to measure the
5579 	 * load on the interrupt CPU. If it is long enough, which indicates the
5580 	 * load is heavy, then we deliver the IO completions to the helpers.
5581 	 * This measurement has some limitations, although it is simple and
5582 	 * straightforward and works well for most of the cases at present.
5583 	 */
5584 	if (!mpt->m_doneq_thread_n ||
5585 	    (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5586 		mptsas_doneq_empty(mpt);
5587 	} else {
5588 		mptsas_deliver_doneq_thread(mpt);
5589 	}
5590 
5591 	/*
5592 	 * If there are queued cmd, start them now.
5593 	 */
5594 	if (mpt->m_waitq != NULL) {
5595 		mptsas_restart_waitq(mpt);
5596 	}
5597 
5598 	mutex_exit(&mpt->m_mutex);
5599 	return (DDI_INTR_CLAIMED);
5600 }
5601 
5602 static void
5603 mptsas_process_intr(mptsas_t *mpt,
5604     pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5605 {
5606 	uint8_t	reply_type;
5607 
5608 	ASSERT(mutex_owned(&mpt->m_mutex));
5609 
5610 	/*
5611 	 * The reply is valid, process it according to its
5612 	 * type.  Also, set a flag for updated the reply index
5613 	 * after they've all been processed.
5614 	 */
5615 	reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5616 	    &reply_desc_union->Default.ReplyFlags);
5617 	reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5618 	if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5619 		mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5620 	} else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5621 		mptsas_handle_address_reply(mpt, reply_desc_union);
5622 	} else {
5623 		mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5624 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5625 	}
5626 
5627 	/*
5628 	 * Clear the reply descriptor for re-use and increment
5629 	 * index.
5630 	 */
5631 	ddi_put64(mpt->m_acc_post_queue_hdl,
5632 	    &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5633 	    0xFFFFFFFFFFFFFFFF);
5634 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5635 	    DDI_DMA_SYNC_FORDEV);
5636 }
5637 
5638 /*
5639  * handle qfull condition
5640  */
5641 static void
5642 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5643 {
5644 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
5645 
5646 	if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5647 	    (ptgt->m_qfull_retries == 0)) {
5648 		/*
5649 		 * We have exhausted the retries on QFULL, or,
5650 		 * the target driver has indicated that it
5651 		 * wants to handle QFULL itself by setting
5652 		 * qfull-retries capability to 0. In either case
5653 		 * we want the target driver's QFULL handling
5654 		 * to kick in. We do this by having pkt_reason
5655 		 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5656 		 */
5657 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5658 	} else {
5659 		if (ptgt->m_reset_delay == 0) {
5660 			ptgt->m_t_throttle =
5661 			    max((ptgt->m_t_ncmds - 2), 0);
5662 		}
5663 
5664 		cmd->cmd_pkt_flags |= FLAG_HEAD;
5665 		cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5666 		cmd->cmd_flags |= CFLAG_RETRY;
5667 
5668 		(void) mptsas_accept_pkt(mpt, cmd);
5669 
5670 		/*
5671 		 * when target gives queue full status with no commands
5672 		 * outstanding (m_t_ncmds == 0), throttle is set to 0
5673 		 * (HOLD_THROTTLE), and the queue full handling start
5674 		 * (see psarc/1994/313); if there are commands outstanding,
5675 		 * throttle is set to (m_t_ncmds - 2)
5676 		 */
5677 		if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5678 			/*
5679 			 * By setting throttle to QFULL_THROTTLE, we
5680 			 * avoid submitting new commands and in
5681 			 * mptsas_restart_cmd find out slots which need
5682 			 * their throttles to be cleared.
5683 			 */
5684 			mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5685 			if (mpt->m_restart_cmd_timeid == 0) {
5686 				mpt->m_restart_cmd_timeid =
5687 				    timeout(mptsas_restart_cmd, mpt,
5688 				    ptgt->m_qfull_retry_interval);
5689 			}
5690 		}
5691 	}
5692 }
5693 
5694 mptsas_phymask_t
5695 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5696 {
5697 	mptsas_phymask_t	phy_mask = 0;
5698 	uint8_t			i = 0;
5699 
5700 	NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5701 
5702 	ASSERT(mutex_owned(&mpt->m_mutex));
5703 
5704 	/*
5705 	 * If physport is 0xFF, this is a RAID volume.  Use phymask of 0.
5706 	 */
5707 	if (physport == 0xFF) {
5708 		return (0);
5709 	}
5710 
5711 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5712 		if (mpt->m_phy_info[i].attached_devhdl &&
5713 		    (mpt->m_phy_info[i].phy_mask != 0) &&
5714 		    (mpt->m_phy_info[i].port_num == physport)) {
5715 			phy_mask = mpt->m_phy_info[i].phy_mask;
5716 			break;
5717 		}
5718 	}
5719 	NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5720 	    mpt->m_instance, physport, phy_mask));
5721 	return (phy_mask);
5722 }
5723 
5724 /*
5725  * mpt free device handle after device gone, by use of passthrough
5726  */
5727 static int
5728 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5729 {
5730 	Mpi2SasIoUnitControlRequest_t	req;
5731 	Mpi2SasIoUnitControlReply_t	rep;
5732 	int				ret;
5733 
5734 	ASSERT(mutex_owned(&mpt->m_mutex));
5735 
5736 	/*
5737 	 * Need to compose a SAS IO Unit Control request message
5738 	 * and call mptsas_do_passthru() function
5739 	 */
5740 	bzero(&req, sizeof (req));
5741 	bzero(&rep, sizeof (rep));
5742 
5743 	req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5744 	req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5745 	req.DevHandle = LE_16(devhdl);
5746 
5747 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5748 	    sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5749 	if (ret != 0) {
5750 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5751 		    "Control error %d", ret);
5752 		return (DDI_FAILURE);
5753 	}
5754 
5755 	/* do passthrough success, check the ioc status */
5756 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5757 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5758 		    "Control IOCStatus %d", LE_16(rep.IOCStatus));
5759 		return (DDI_FAILURE);
5760 	}
5761 
5762 	return (DDI_SUCCESS);
5763 }
5764 
5765 static void
5766 mptsas_update_phymask(mptsas_t *mpt)
5767 {
5768 	mptsas_phymask_t mask = 0, phy_mask;
5769 	char		*phy_mask_name;
5770 	uint8_t		current_port;
5771 	int		i, j;
5772 
5773 	NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5774 
5775 	ASSERT(mutex_owned(&mpt->m_mutex));
5776 
5777 	(void) mptsas_get_sas_io_unit_page(mpt);
5778 
5779 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5780 
5781 	for (i = 0; i < mpt->m_num_phys; i++) {
5782 		phy_mask = 0x00;
5783 
5784 		if (mpt->m_phy_info[i].attached_devhdl == 0)
5785 			continue;
5786 
5787 		bzero(phy_mask_name, sizeof (phy_mask_name));
5788 
5789 		current_port = mpt->m_phy_info[i].port_num;
5790 
5791 		if ((mask & (1 << i)) != 0)
5792 			continue;
5793 
5794 		for (j = 0; j < mpt->m_num_phys; j++) {
5795 			if (mpt->m_phy_info[j].attached_devhdl &&
5796 			    (mpt->m_phy_info[j].port_num == current_port)) {
5797 				phy_mask |= (1 << j);
5798 			}
5799 		}
5800 		mask = mask | phy_mask;
5801 
5802 		for (j = 0; j < mpt->m_num_phys; j++) {
5803 			if ((phy_mask >> j) & 0x01) {
5804 				mpt->m_phy_info[j].phy_mask = phy_mask;
5805 			}
5806 		}
5807 
5808 		(void) sprintf(phy_mask_name, "%x", phy_mask);
5809 
5810 		mutex_exit(&mpt->m_mutex);
5811 		/*
5812 		 * register a iport, if the port has already been existed
5813 		 * SCSA will do nothing and just return.
5814 		 */
5815 		(void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5816 		mutex_enter(&mpt->m_mutex);
5817 	}
5818 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5819 	NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5820 }
5821 
5822 /*
5823  * mptsas_handle_dr is a task handler for DR, the DR action includes:
5824  * 1. Directly attched Device Added/Removed.
5825  * 2. Expander Device Added/Removed.
5826  * 3. Indirectly Attached Device Added/Expander.
5827  * 4. LUNs of a existing device status change.
5828  * 5. RAID volume created/deleted.
5829  * 6. Member of RAID volume is released because of RAID deletion.
5830  * 7. Physical disks are removed because of RAID creation.
5831  */
5832 static void
5833 mptsas_handle_dr(void *args) {
5834 	mptsas_topo_change_list_t	*topo_node = NULL;
5835 	mptsas_topo_change_list_t	*save_node = NULL;
5836 	mptsas_t			*mpt;
5837 	dev_info_t			*parent = NULL;
5838 	mptsas_phymask_t		phymask = 0;
5839 	char				*phy_mask_name;
5840 	uint8_t				flags = 0, physport = 0xff;
5841 	uint8_t				port_update = 0;
5842 	uint_t				event;
5843 
5844 	topo_node = (mptsas_topo_change_list_t *)args;
5845 
5846 	mpt = topo_node->mpt;
5847 	event = topo_node->event;
5848 	flags = topo_node->flags;
5849 
5850 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5851 
5852 	NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5853 
5854 	switch (event) {
5855 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5856 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5857 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5858 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5859 			/*
5860 			 * Direct attached or expander attached device added
5861 			 * into system or a Phys Disk that is being unhidden.
5862 			 */
5863 			port_update = 1;
5864 		}
5865 		break;
5866 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
5867 		/*
5868 		 * New expander added into system, it must be the head
5869 		 * of topo_change_list_t
5870 		 */
5871 		port_update = 1;
5872 		break;
5873 	default:
5874 		port_update = 0;
5875 		break;
5876 	}
5877 	/*
5878 	 * All cases port_update == 1 may cause initiator port form change
5879 	 */
5880 	mutex_enter(&mpt->m_mutex);
5881 	if (mpt->m_port_chng && port_update) {
5882 		/*
5883 		 * mpt->m_port_chng flag indicates some PHYs of initiator
5884 		 * port have changed to online. So when expander added or
5885 		 * directly attached device online event come, we force to
5886 		 * update port information by issueing SAS IO Unit Page and
5887 		 * update PHYMASKs.
5888 		 */
5889 		(void) mptsas_update_phymask(mpt);
5890 		mpt->m_port_chng = 0;
5891 
5892 	}
5893 	mutex_exit(&mpt->m_mutex);
5894 	while (topo_node) {
5895 		phymask = 0;
5896 		if (parent == NULL) {
5897 			physport = topo_node->un.physport;
5898 			event = topo_node->event;
5899 			flags = topo_node->flags;
5900 			if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5901 			    MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5902 				/*
5903 				 * For all offline events, phymask is known
5904 				 */
5905 				phymask = topo_node->un.phymask;
5906 				goto find_parent;
5907 			}
5908 			if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5909 				goto handle_topo_change;
5910 			}
5911 			if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5912 				phymask = topo_node->un.phymask;
5913 				goto find_parent;
5914 			}
5915 
5916 			if ((flags ==
5917 			    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5918 			    (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5919 				/*
5920 				 * There is no any field in IR_CONFIG_CHANGE
5921 				 * event indicate physport/phynum, let's get
5922 				 * parent after SAS Device Page0 request.
5923 				 */
5924 				goto handle_topo_change;
5925 			}
5926 
5927 			mutex_enter(&mpt->m_mutex);
5928 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5929 				/*
5930 				 * If the direct attached device added or a
5931 				 * phys disk is being unhidden, argument
5932 				 * physport actually is PHY#, so we have to get
5933 				 * phymask according PHY#.
5934 				 */
5935 				physport = mpt->m_phy_info[physport].port_num;
5936 			}
5937 
5938 			/*
5939 			 * Translate physport to phymask so that we can search
5940 			 * parent dip.
5941 			 */
5942 			phymask = mptsas_physport_to_phymask(mpt,
5943 			    physport);
5944 			mutex_exit(&mpt->m_mutex);
5945 
5946 find_parent:
5947 			bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5948 			/*
5949 			 * For RAID topology change node, write the iport name
5950 			 * as v0.
5951 			 */
5952 			if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5953 				(void) sprintf(phy_mask_name, "v0");
5954 			} else {
5955 				/*
5956 				 * phymask can bo 0 if the drive has been
5957 				 * pulled by the time an add event is
5958 				 * processed.  If phymask is 0, just skip this
5959 				 * event and continue.
5960 				 */
5961 				if (phymask == 0) {
5962 					mutex_enter(&mpt->m_mutex);
5963 					save_node = topo_node;
5964 					topo_node = topo_node->next;
5965 					ASSERT(save_node);
5966 					kmem_free(save_node,
5967 					    sizeof (mptsas_topo_change_list_t));
5968 					mutex_exit(&mpt->m_mutex);
5969 
5970 					parent = NULL;
5971 					continue;
5972 				}
5973 				(void) sprintf(phy_mask_name, "%x", phymask);
5974 			}
5975 			parent = scsi_hba_iport_find(mpt->m_dip,
5976 			    phy_mask_name);
5977 			if (parent == NULL) {
5978 				mptsas_log(mpt, CE_WARN, "Failed to find an "
5979 				    "iport, should not happen!");
5980 				goto out;
5981 			}
5982 
5983 		}
5984 		ASSERT(parent);
5985 handle_topo_change:
5986 
5987 		mutex_enter(&mpt->m_mutex);
5988 
5989 		mptsas_handle_topo_change(topo_node, parent);
5990 		save_node = topo_node;
5991 		topo_node = topo_node->next;
5992 		ASSERT(save_node);
5993 		kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5994 		mutex_exit(&mpt->m_mutex);
5995 
5996 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5997 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5998 		    (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5999 			/*
6000 			 * If direct attached device associated, make sure
6001 			 * reset the parent before start the next one. But
6002 			 * all devices associated with expander shares the
6003 			 * parent.  Also, reset parent if this is for RAID.
6004 			 */
6005 			parent = NULL;
6006 		}
6007 	}
6008 out:
6009 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6010 }
6011 
6012 static void
6013 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6014     dev_info_t *parent)
6015 {
6016 	mptsas_target_t	*ptgt = NULL;
6017 	mptsas_smp_t	*psmp = NULL;
6018 	mptsas_t	*mpt = (void *)topo_node->mpt;
6019 	uint16_t	devhdl;
6020 	uint16_t	attached_devhdl;
6021 	uint64_t	sas_wwn = 0;
6022 	int		rval = 0;
6023 	uint32_t	page_address;
6024 	uint8_t		phy, flags;
6025 	char		*addr = NULL;
6026 	dev_info_t	*lundip;
6027 	int		circ = 0, circ1 = 0;
6028 	char		attached_wwnstr[MPTSAS_WWN_STRLEN];
6029 
6030 	NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
6031 
6032 	ASSERT(mutex_owned(&mpt->m_mutex));
6033 
6034 	switch (topo_node->event) {
6035 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6036 	{
6037 		char *phy_mask_name;
6038 		mptsas_phymask_t phymask = 0;
6039 
6040 		if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6041 			/*
6042 			 * Get latest RAID info.
6043 			 */
6044 			(void) mptsas_get_raid_info(mpt);
6045 			ptgt = mptsas_search_by_devhdl(
6046 			    &mpt->m_active->m_tgttbl, topo_node->devhdl);
6047 			if (ptgt == NULL)
6048 				break;
6049 		} else {
6050 			ptgt = (void *)topo_node->object;
6051 		}
6052 
6053 		if (ptgt == NULL) {
6054 			/*
6055 			 * If a Phys Disk was deleted, RAID info needs to be
6056 			 * updated to reflect the new topology.
6057 			 */
6058 			(void) mptsas_get_raid_info(mpt);
6059 
6060 			/*
6061 			 * Get sas device page 0 by DevHandle to make sure if
6062 			 * SSP/SATA end device exist.
6063 			 */
6064 			page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6065 			    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6066 			    topo_node->devhdl;
6067 
6068 			rval = mptsas_get_target_device_info(mpt, page_address,
6069 			    &devhdl, &ptgt);
6070 			if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6071 				mptsas_log(mpt, CE_NOTE,
6072 				    "mptsas_handle_topo_change: target %d is "
6073 				    "not a SAS/SATA device. \n",
6074 				    topo_node->devhdl);
6075 			} else if (rval == DEV_INFO_FAIL_ALLOC) {
6076 				mptsas_log(mpt, CE_NOTE,
6077 				    "mptsas_handle_topo_change: could not "
6078 				    "allocate memory. \n");
6079 			}
6080 			/*
6081 			 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6082 			 * else to do, just leave.
6083 			 */
6084 			if (rval != DEV_INFO_SUCCESS) {
6085 				return;
6086 			}
6087 		}
6088 
6089 		ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6090 
6091 		mutex_exit(&mpt->m_mutex);
6092 		flags = topo_node->flags;
6093 
6094 		if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6095 			phymask = ptgt->m_phymask;
6096 			phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6097 			(void) sprintf(phy_mask_name, "%x", phymask);
6098 			parent = scsi_hba_iport_find(mpt->m_dip,
6099 			    phy_mask_name);
6100 			kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6101 			if (parent == NULL) {
6102 				mptsas_log(mpt, CE_WARN, "Failed to find a "
6103 				    "iport for PD, should not happen!");
6104 				mutex_enter(&mpt->m_mutex);
6105 				break;
6106 			}
6107 		}
6108 
6109 		if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6110 			ndi_devi_enter(parent, &circ1);
6111 			(void) mptsas_config_raid(parent, topo_node->devhdl,
6112 			    &lundip);
6113 			ndi_devi_exit(parent, circ1);
6114 		} else {
6115 			/*
6116 			 * hold nexus for bus configure
6117 			 */
6118 			ndi_devi_enter(scsi_vhci_dip, &circ);
6119 			ndi_devi_enter(parent, &circ1);
6120 			rval = mptsas_config_target(parent, ptgt);
6121 			/*
6122 			 * release nexus for bus configure
6123 			 */
6124 			ndi_devi_exit(parent, circ1);
6125 			ndi_devi_exit(scsi_vhci_dip, circ);
6126 
6127 			/*
6128 			 * Add parent's props for SMHBA support
6129 			 */
6130 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6131 				bzero(attached_wwnstr,
6132 				    sizeof (attached_wwnstr));
6133 				(void) sprintf(attached_wwnstr, "w%016"PRIx64,
6134 				    ptgt->m_sas_wwn);
6135 				if (ddi_prop_update_string(DDI_DEV_T_NONE,
6136 				    parent,
6137 				    SCSI_ADDR_PROP_ATTACHED_PORT,
6138 				    attached_wwnstr)
6139 				    != DDI_PROP_SUCCESS) {
6140 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6141 					    parent,
6142 					    SCSI_ADDR_PROP_ATTACHED_PORT);
6143 					mptsas_log(mpt, CE_WARN, "Failed to"
6144 					    "attached-port props");
6145 					return;
6146 				}
6147 				if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6148 				    MPTSAS_NUM_PHYS, 1) !=
6149 				    DDI_PROP_SUCCESS) {
6150 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6151 					    parent, MPTSAS_NUM_PHYS);
6152 					mptsas_log(mpt, CE_WARN, "Failed to"
6153 					    " create num-phys props");
6154 					return;
6155 				}
6156 
6157 				/*
6158 				 * Update PHY info for smhba
6159 				 */
6160 				mutex_enter(&mpt->m_mutex);
6161 				if (mptsas_smhba_phy_init(mpt)) {
6162 					mutex_exit(&mpt->m_mutex);
6163 					mptsas_log(mpt, CE_WARN, "mptsas phy"
6164 					    " update failed");
6165 					return;
6166 				}
6167 				mutex_exit(&mpt->m_mutex);
6168 				mptsas_smhba_set_phy_props(mpt,
6169 				    ddi_get_name_addr(parent), parent,
6170 				    1, &attached_devhdl);
6171 				if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6172 				    MPTSAS_VIRTUAL_PORT, 0) !=
6173 				    DDI_PROP_SUCCESS) {
6174 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6175 					    parent, MPTSAS_VIRTUAL_PORT);
6176 					mptsas_log(mpt, CE_WARN,
6177 					    "mptsas virtual-port"
6178 					    "port prop update failed");
6179 					return;
6180 				}
6181 			}
6182 		}
6183 		mutex_enter(&mpt->m_mutex);
6184 
6185 		NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6186 		    "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6187 		    ptgt->m_phymask));
6188 		break;
6189 	}
6190 	case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6191 	{
6192 		mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
6193 		devhdl = topo_node->devhdl;
6194 		ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
6195 		if (ptgt == NULL)
6196 			break;
6197 
6198 		sas_wwn = ptgt->m_sas_wwn;
6199 		phy = ptgt->m_phynum;
6200 
6201 		addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6202 
6203 		if (sas_wwn) {
6204 			(void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6205 		} else {
6206 			(void) sprintf(addr, "p%x", phy);
6207 		}
6208 		ASSERT(ptgt->m_devhdl == devhdl);
6209 
6210 		if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6211 		    (topo_node->flags ==
6212 		    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6213 			/*
6214 			 * Get latest RAID info if RAID volume status changes
6215 			 * or Phys Disk status changes
6216 			 */
6217 			(void) mptsas_get_raid_info(mpt);
6218 		}
6219 		/*
6220 		 * Abort all outstanding command on the device
6221 		 */
6222 		rval = mptsas_do_scsi_reset(mpt, devhdl);
6223 		if (rval) {
6224 			NDBG20(("mptsas%d handle_topo_change to reset target "
6225 			    "before offline devhdl:%x, phymask:%x, rval:%x",
6226 			    mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6227 			    rval));
6228 		}
6229 
6230 		mutex_exit(&mpt->m_mutex);
6231 
6232 		ndi_devi_enter(scsi_vhci_dip, &circ);
6233 		ndi_devi_enter(parent, &circ1);
6234 		rval = mptsas_offline_target(parent, addr);
6235 		ndi_devi_exit(parent, circ1);
6236 		ndi_devi_exit(scsi_vhci_dip, circ);
6237 		NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6238 		    "phymask:%x, rval:%x", mpt->m_instance,
6239 		    ptgt->m_devhdl, ptgt->m_phymask, rval));
6240 
6241 		kmem_free(addr, SCSI_MAXNAMELEN);
6242 
6243 		/*
6244 		 * Clear parent's props for SMHBA support
6245 		 */
6246 		flags = topo_node->flags;
6247 		if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6248 			bzero(attached_wwnstr, sizeof (attached_wwnstr));
6249 			if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6250 			    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6251 			    DDI_PROP_SUCCESS) {
6252 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6253 				    SCSI_ADDR_PROP_ATTACHED_PORT);
6254 				mptsas_log(mpt, CE_WARN, "mptsas attached port "
6255 				    "prop update failed");
6256 				break;
6257 			}
6258 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6259 			    MPTSAS_NUM_PHYS, 0) !=
6260 			    DDI_PROP_SUCCESS) {
6261 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6262 				    MPTSAS_NUM_PHYS);
6263 				mptsas_log(mpt, CE_WARN, "mptsas num phys "
6264 				    "prop update failed");
6265 				break;
6266 			}
6267 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6268 			    MPTSAS_VIRTUAL_PORT, 1) !=
6269 			    DDI_PROP_SUCCESS) {
6270 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6271 				    MPTSAS_VIRTUAL_PORT);
6272 				mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6273 				    "prop update failed");
6274 				break;
6275 			}
6276 		}
6277 
6278 		mutex_enter(&mpt->m_mutex);
6279 		if (rval == DDI_SUCCESS) {
6280 			mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6281 			    ptgt->m_sas_wwn, ptgt->m_phymask);
6282 			ptgt = NULL;
6283 		} else {
6284 			/*
6285 			 * clean DR_INTRANSITION flag to allow I/O down to
6286 			 * PHCI driver since failover finished.
6287 			 * Invalidate the devhdl
6288 			 */
6289 			ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6290 			ptgt->m_tgt_unconfigured = 0;
6291 			mutex_enter(&mpt->m_tx_waitq_mutex);
6292 			ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6293 			mutex_exit(&mpt->m_tx_waitq_mutex);
6294 		}
6295 
6296 		/*
6297 		 * Send SAS IO Unit Control to free the dev handle
6298 		 */
6299 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6300 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6301 			rval = mptsas_free_devhdl(mpt, devhdl);
6302 
6303 			NDBG20(("mptsas%d handle_topo_change to remove "
6304 			    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6305 			    rval));
6306 		}
6307 
6308 		break;
6309 	}
6310 	case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6311 	{
6312 		devhdl = topo_node->devhdl;
6313 		/*
6314 		 * If this is the remove handle event, do a reset first.
6315 		 */
6316 		if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6317 			rval = mptsas_do_scsi_reset(mpt, devhdl);
6318 			if (rval) {
6319 				NDBG20(("mpt%d reset target before remove "
6320 				    "devhdl:%x, rval:%x", mpt->m_instance,
6321 				    devhdl, rval));
6322 			}
6323 		}
6324 
6325 		/*
6326 		 * Send SAS IO Unit Control to free the dev handle
6327 		 */
6328 		rval = mptsas_free_devhdl(mpt, devhdl);
6329 		NDBG20(("mptsas%d handle_topo_change to remove "
6330 		    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6331 		    rval));
6332 		break;
6333 	}
6334 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
6335 	{
6336 		mptsas_smp_t smp;
6337 		dev_info_t *smpdip;
6338 		mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6339 
6340 		devhdl = topo_node->devhdl;
6341 
6342 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6343 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6344 		rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6345 		if (rval != DDI_SUCCESS) {
6346 			mptsas_log(mpt, CE_WARN, "failed to online smp, "
6347 			    "handle %x", devhdl);
6348 			return;
6349 		}
6350 
6351 		psmp = mptsas_smp_alloc(smptbl, &smp);
6352 		if (psmp == NULL) {
6353 			return;
6354 		}
6355 
6356 		mutex_exit(&mpt->m_mutex);
6357 		ndi_devi_enter(parent, &circ1);
6358 		(void) mptsas_online_smp(parent, psmp, &smpdip);
6359 		ndi_devi_exit(parent, circ1);
6360 
6361 		mutex_enter(&mpt->m_mutex);
6362 		break;
6363 	}
6364 	case MPTSAS_DR_EVENT_OFFLINE_SMP:
6365 	{
6366 		mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6367 		devhdl = topo_node->devhdl;
6368 		uint32_t dev_info;
6369 
6370 		psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6371 		if (psmp == NULL)
6372 			break;
6373 		/*
6374 		 * The mptsas_smp_t data is released only if the dip is offlined
6375 		 * successfully.
6376 		 */
6377 		mutex_exit(&mpt->m_mutex);
6378 
6379 		ndi_devi_enter(parent, &circ1);
6380 		rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6381 		ndi_devi_exit(parent, circ1);
6382 
6383 		dev_info = psmp->m_deviceinfo;
6384 		if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6385 		    DEVINFO_DIRECT_ATTACHED) {
6386 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6387 			    MPTSAS_VIRTUAL_PORT, 1) !=
6388 			    DDI_PROP_SUCCESS) {
6389 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6390 				    MPTSAS_VIRTUAL_PORT);
6391 				mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6392 				    "prop update failed");
6393 				return;
6394 			}
6395 			/*
6396 			 * Check whether the smp connected to the iport,
6397 			 */
6398 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6399 			    MPTSAS_NUM_PHYS, 0) !=
6400 			    DDI_PROP_SUCCESS) {
6401 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6402 				    MPTSAS_NUM_PHYS);
6403 				mptsas_log(mpt, CE_WARN, "mptsas num phys"
6404 				    "prop update failed");
6405 				return;
6406 			}
6407 			/*
6408 			 * Clear parent's attached-port props
6409 			 */
6410 			bzero(attached_wwnstr, sizeof (attached_wwnstr));
6411 			if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6412 			    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6413 			    DDI_PROP_SUCCESS) {
6414 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6415 				    SCSI_ADDR_PROP_ATTACHED_PORT);
6416 				mptsas_log(mpt, CE_WARN, "mptsas attached port "
6417 				    "prop update failed");
6418 				return;
6419 			}
6420 		}
6421 
6422 		mutex_enter(&mpt->m_mutex);
6423 		NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6424 		    "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6425 		if (rval == DDI_SUCCESS) {
6426 			mptsas_smp_free(smptbl, psmp->m_sasaddr,
6427 			    psmp->m_phymask);
6428 		} else {
6429 			psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6430 		}
6431 
6432 		bzero(attached_wwnstr, sizeof (attached_wwnstr));
6433 
6434 		break;
6435 	}
6436 	default:
6437 		return;
6438 	}
6439 }
6440 
6441 /*
6442  * Record the event if its type is enabled in mpt instance by ioctl.
6443  */
6444 static void
6445 mptsas_record_event(void *args)
6446 {
6447 	m_replyh_arg_t			*replyh_arg;
6448 	pMpi2EventNotificationReply_t	eventreply;
6449 	uint32_t			event, rfm;
6450 	mptsas_t			*mpt;
6451 	int				i, j;
6452 	uint16_t			event_data_len;
6453 	boolean_t			sendAEN = FALSE;
6454 
6455 	replyh_arg = (m_replyh_arg_t *)args;
6456 	rfm = replyh_arg->rfm;
6457 	mpt = replyh_arg->mpt;
6458 
6459 	eventreply = (pMpi2EventNotificationReply_t)
6460 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6461 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6462 
6463 
6464 	/*
6465 	 * Generate a system event to let anyone who cares know that a
6466 	 * LOG_ENTRY_ADDED event has occurred.  This is sent no matter what the
6467 	 * event mask is set to.
6468 	 */
6469 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6470 		sendAEN = TRUE;
6471 	}
6472 
6473 	/*
6474 	 * Record the event only if it is not masked.  Determine which dword
6475 	 * and bit of event mask to test.
6476 	 */
6477 	i = (uint8_t)(event / 32);
6478 	j = (uint8_t)(event % 32);
6479 	if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6480 		i = mpt->m_event_index;
6481 		mpt->m_events[i].Type = event;
6482 		mpt->m_events[i].Number = ++mpt->m_event_number;
6483 		bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6484 		event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6485 		    &eventreply->EventDataLength);
6486 
6487 		if (event_data_len > 0) {
6488 			/*
6489 			 * Limit data to size in m_event entry
6490 			 */
6491 			if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6492 				event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6493 			}
6494 			for (j = 0; j < event_data_len; j++) {
6495 				mpt->m_events[i].Data[j] =
6496 				    ddi_get32(mpt->m_acc_reply_frame_hdl,
6497 				    &(eventreply->EventData[j]));
6498 			}
6499 
6500 			/*
6501 			 * check for index wrap-around
6502 			 */
6503 			if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6504 				i = 0;
6505 			}
6506 			mpt->m_event_index = (uint8_t)i;
6507 
6508 			/*
6509 			 * Set flag to send the event.
6510 			 */
6511 			sendAEN = TRUE;
6512 		}
6513 	}
6514 
6515 	/*
6516 	 * Generate a system event if flag is set to let anyone who cares know
6517 	 * that an event has occurred.
6518 	 */
6519 	if (sendAEN) {
6520 		(void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6521 		    "SAS", NULL, NULL, DDI_NOSLEEP);
6522 	}
6523 }
6524 
6525 #define	SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6526 /*
6527  * handle sync events from ioc in interrupt
6528  * return value:
6529  * DDI_SUCCESS: The event is handled by this func
6530  * DDI_FAILURE: Event is not handled
6531  */
6532 static int
6533 mptsas_handle_event_sync(void *args)
6534 {
6535 	m_replyh_arg_t			*replyh_arg;
6536 	pMpi2EventNotificationReply_t	eventreply;
6537 	uint32_t			event, rfm;
6538 	mptsas_t			*mpt;
6539 	uint_t				iocstatus;
6540 
6541 	replyh_arg = (m_replyh_arg_t *)args;
6542 	rfm = replyh_arg->rfm;
6543 	mpt = replyh_arg->mpt;
6544 
6545 	ASSERT(mutex_owned(&mpt->m_mutex));
6546 
6547 	eventreply = (pMpi2EventNotificationReply_t)
6548 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6549 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6550 
6551 	if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6552 	    &eventreply->IOCStatus)) {
6553 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6554 			mptsas_log(mpt, CE_WARN,
6555 			    "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6556 			    "IOCLogInfo=0x%x", iocstatus,
6557 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6558 			    &eventreply->IOCLogInfo));
6559 		} else {
6560 			mptsas_log(mpt, CE_WARN,
6561 			    "mptsas_handle_event_sync: IOCStatus=0x%x, "
6562 			    "IOCLogInfo=0x%x", iocstatus,
6563 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6564 			    &eventreply->IOCLogInfo));
6565 		}
6566 	}
6567 
6568 	/*
6569 	 * figure out what kind of event we got and handle accordingly
6570 	 */
6571 	switch (event) {
6572 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6573 	{
6574 		pMpi2EventDataSasTopologyChangeList_t	sas_topo_change_list;
6575 		uint8_t				num_entries, expstatus, phy;
6576 		uint8_t				phystatus, physport, state, i;
6577 		uint8_t				start_phy_num, link_rate;
6578 		uint16_t			dev_handle, reason_code;
6579 		uint16_t			enc_handle, expd_handle;
6580 		char				string[80], curr[80], prev[80];
6581 		mptsas_topo_change_list_t	*topo_head = NULL;
6582 		mptsas_topo_change_list_t	*topo_tail = NULL;
6583 		mptsas_topo_change_list_t	*topo_node = NULL;
6584 		mptsas_target_t			*ptgt;
6585 		mptsas_smp_t			*psmp;
6586 		mptsas_hash_table_t		*tgttbl, *smptbl;
6587 		uint8_t				flags = 0, exp_flag;
6588 		smhba_info_t			*pSmhba = NULL;
6589 
6590 		NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6591 
6592 		tgttbl = &mpt->m_active->m_tgttbl;
6593 		smptbl = &mpt->m_active->m_smptbl;
6594 
6595 		sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6596 		    eventreply->EventData;
6597 
6598 		enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6599 		    &sas_topo_change_list->EnclosureHandle);
6600 		expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6601 		    &sas_topo_change_list->ExpanderDevHandle);
6602 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6603 		    &sas_topo_change_list->NumEntries);
6604 		start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6605 		    &sas_topo_change_list->StartPhyNum);
6606 		expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6607 		    &sas_topo_change_list->ExpStatus);
6608 		physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6609 		    &sas_topo_change_list->PhysicalPort);
6610 
6611 		string[0] = 0;
6612 		if (expd_handle) {
6613 			flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6614 			switch (expstatus) {
6615 			case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6616 				(void) sprintf(string, " added");
6617 				/*
6618 				 * New expander device added
6619 				 */
6620 				mpt->m_port_chng = 1;
6621 				topo_node = kmem_zalloc(
6622 				    sizeof (mptsas_topo_change_list_t),
6623 				    KM_SLEEP);
6624 				topo_node->mpt = mpt;
6625 				topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6626 				topo_node->un.physport = physport;
6627 				topo_node->devhdl = expd_handle;
6628 				topo_node->flags = flags;
6629 				topo_node->object = NULL;
6630 				if (topo_head == NULL) {
6631 					topo_head = topo_tail = topo_node;
6632 				} else {
6633 					topo_tail->next = topo_node;
6634 					topo_tail = topo_node;
6635 				}
6636 				break;
6637 			case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6638 				(void) sprintf(string, " not responding, "
6639 				    "removed");
6640 				psmp = mptsas_search_by_devhdl(smptbl,
6641 				    expd_handle);
6642 				if (psmp == NULL)
6643 					break;
6644 
6645 				topo_node = kmem_zalloc(
6646 				    sizeof (mptsas_topo_change_list_t),
6647 				    KM_SLEEP);
6648 				topo_node->mpt = mpt;
6649 				topo_node->un.phymask = psmp->m_phymask;
6650 				topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6651 				topo_node->devhdl = expd_handle;
6652 				topo_node->flags = flags;
6653 				topo_node->object = NULL;
6654 				if (topo_head == NULL) {
6655 					topo_head = topo_tail = topo_node;
6656 				} else {
6657 					topo_tail->next = topo_node;
6658 					topo_tail = topo_node;
6659 				}
6660 				break;
6661 			case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6662 				break;
6663 			case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6664 				(void) sprintf(string, " not responding, "
6665 				    "delaying removal");
6666 				break;
6667 			default:
6668 				break;
6669 			}
6670 		} else {
6671 			flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6672 		}
6673 
6674 		NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6675 		    enc_handle, expd_handle, string));
6676 		for (i = 0; i < num_entries; i++) {
6677 			phy = i + start_phy_num;
6678 			phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6679 			    &sas_topo_change_list->PHY[i].PhyStatus);
6680 			dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6681 			    &sas_topo_change_list->PHY[i].AttachedDevHandle);
6682 			reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6683 			/*
6684 			 * Filter out processing of Phy Vacant Status unless
6685 			 * the reason code is "Not Responding".  Process all
6686 			 * other combinations of Phy Status and Reason Codes.
6687 			 */
6688 			if ((phystatus &
6689 			    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6690 			    (reason_code !=
6691 			    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6692 				continue;
6693 			}
6694 			curr[0] = 0;
6695 			prev[0] = 0;
6696 			string[0] = 0;
6697 			switch (reason_code) {
6698 			case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6699 			{
6700 				NDBG20(("mptsas%d phy %d physical_port %d "
6701 				    "dev_handle %d added", mpt->m_instance, phy,
6702 				    physport, dev_handle));
6703 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6704 				    &sas_topo_change_list->PHY[i].LinkRate);
6705 				state = (link_rate &
6706 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6707 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6708 				switch (state) {
6709 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6710 					(void) sprintf(curr, "is disabled");
6711 					break;
6712 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6713 					(void) sprintf(curr, "is offline, "
6714 					    "failed speed negotiation");
6715 					break;
6716 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6717 					(void) sprintf(curr, "SATA OOB "
6718 					    "complete");
6719 					break;
6720 				case SMP_RESET_IN_PROGRESS:
6721 					(void) sprintf(curr, "SMP reset in "
6722 					    "progress");
6723 					break;
6724 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6725 					(void) sprintf(curr, "is online at "
6726 					    "1.5 Gbps");
6727 					break;
6728 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6729 					(void) sprintf(curr, "is online at 3.0 "
6730 					    "Gbps");
6731 					break;
6732 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6733 					(void) sprintf(curr, "is online at 6.0 "
6734 					    "Gbps");
6735 					break;
6736 				default:
6737 					(void) sprintf(curr, "state is "
6738 					    "unknown");
6739 					break;
6740 				}
6741 				/*
6742 				 * New target device added into the system.
6743 				 * Set association flag according to if an
6744 				 * expander is used or not.
6745 				 */
6746 				exp_flag =
6747 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6748 				if (flags ==
6749 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6750 					flags = exp_flag;
6751 				}
6752 				topo_node = kmem_zalloc(
6753 				    sizeof (mptsas_topo_change_list_t),
6754 				    KM_SLEEP);
6755 				topo_node->mpt = mpt;
6756 				topo_node->event =
6757 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
6758 				if (expd_handle == 0) {
6759 					/*
6760 					 * Per MPI 2, if expander dev handle
6761 					 * is 0, it's a directly attached
6762 					 * device. So driver use PHY to decide
6763 					 * which iport is associated
6764 					 */
6765 					physport = phy;
6766 					mpt->m_port_chng = 1;
6767 				}
6768 				topo_node->un.physport = physport;
6769 				topo_node->devhdl = dev_handle;
6770 				topo_node->flags = flags;
6771 				topo_node->object = NULL;
6772 				if (topo_head == NULL) {
6773 					topo_head = topo_tail = topo_node;
6774 				} else {
6775 					topo_tail->next = topo_node;
6776 					topo_tail = topo_node;
6777 				}
6778 				break;
6779 			}
6780 			case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6781 			{
6782 				NDBG20(("mptsas%d phy %d physical_port %d "
6783 				    "dev_handle %d removed", mpt->m_instance,
6784 				    phy, physport, dev_handle));
6785 				/*
6786 				 * Set association flag according to if an
6787 				 * expander is used or not.
6788 				 */
6789 				exp_flag =
6790 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6791 				if (flags ==
6792 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6793 					flags = exp_flag;
6794 				}
6795 				/*
6796 				 * Target device is removed from the system
6797 				 * Before the device is really offline from
6798 				 * from system.
6799 				 */
6800 				ptgt = mptsas_search_by_devhdl(tgttbl,
6801 				    dev_handle);
6802 				/*
6803 				 * If ptgt is NULL here, it means that the
6804 				 * DevHandle is not in the hash table.  This is
6805 				 * reasonable sometimes.  For example, if a
6806 				 * disk was pulled, then added, then pulled
6807 				 * again, the disk will not have been put into
6808 				 * the hash table because the add event will
6809 				 * have an invalid phymask.  BUT, this does not
6810 				 * mean that the DevHandle is invalid.  The
6811 				 * controller will still have a valid DevHandle
6812 				 * that must be removed.  To do this, use the
6813 				 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6814 				 */
6815 				if (ptgt == NULL) {
6816 					topo_node = kmem_zalloc(
6817 					    sizeof (mptsas_topo_change_list_t),
6818 					    KM_SLEEP);
6819 					topo_node->mpt = mpt;
6820 					topo_node->un.phymask = 0;
6821 					topo_node->event =
6822 					    MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6823 					topo_node->devhdl = dev_handle;
6824 					topo_node->flags = flags;
6825 					topo_node->object = NULL;
6826 					if (topo_head == NULL) {
6827 						topo_head = topo_tail =
6828 						    topo_node;
6829 					} else {
6830 						topo_tail->next = topo_node;
6831 						topo_tail = topo_node;
6832 					}
6833 					break;
6834 				}
6835 
6836 				/*
6837 				 * Update DR flag immediately avoid I/O failure
6838 				 * before failover finish. Pay attention to the
6839 				 * mutex protect, we need grab m_tx_waitq_mutex
6840 				 * during set m_dr_flag because we won't add
6841 				 * the following command into waitq, instead,
6842 				 * we need return TRAN_BUSY in the tran_start
6843 				 * context.
6844 				 */
6845 				mutex_enter(&mpt->m_tx_waitq_mutex);
6846 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6847 				mutex_exit(&mpt->m_tx_waitq_mutex);
6848 
6849 				topo_node = kmem_zalloc(
6850 				    sizeof (mptsas_topo_change_list_t),
6851 				    KM_SLEEP);
6852 				topo_node->mpt = mpt;
6853 				topo_node->un.phymask = ptgt->m_phymask;
6854 				topo_node->event =
6855 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
6856 				topo_node->devhdl = dev_handle;
6857 				topo_node->flags = flags;
6858 				topo_node->object = NULL;
6859 				if (topo_head == NULL) {
6860 					topo_head = topo_tail = topo_node;
6861 				} else {
6862 					topo_tail->next = topo_node;
6863 					topo_tail = topo_node;
6864 				}
6865 				break;
6866 			}
6867 			case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6868 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6869 				    &sas_topo_change_list->PHY[i].LinkRate);
6870 				state = (link_rate &
6871 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6872 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6873 				pSmhba = &mpt->m_phy_info[i].smhba_info;
6874 				pSmhba->negotiated_link_rate = state;
6875 				switch (state) {
6876 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6877 					(void) sprintf(curr, "is disabled");
6878 					mptsas_smhba_log_sysevent(mpt,
6879 					    ESC_SAS_PHY_EVENT,
6880 					    SAS_PHY_REMOVE,
6881 					    &mpt->m_phy_info[i].smhba_info);
6882 					mpt->m_phy_info[i].smhba_info.
6883 					    negotiated_link_rate
6884 					    = 0x1;
6885 					break;
6886 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6887 					(void) sprintf(curr, "is offline, "
6888 					    "failed speed negotiation");
6889 					mptsas_smhba_log_sysevent(mpt,
6890 					    ESC_SAS_PHY_EVENT,
6891 					    SAS_PHY_OFFLINE,
6892 					    &mpt->m_phy_info[i].smhba_info);
6893 					break;
6894 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6895 					(void) sprintf(curr, "SATA OOB "
6896 					    "complete");
6897 					break;
6898 				case SMP_RESET_IN_PROGRESS:
6899 					(void) sprintf(curr, "SMP reset in "
6900 					    "progress");
6901 					break;
6902 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6903 					(void) sprintf(curr, "is online at "
6904 					    "1.5 Gbps");
6905 					if ((expd_handle == 0) &&
6906 					    (enc_handle == 1)) {
6907 						mpt->m_port_chng = 1;
6908 					}
6909 					mptsas_smhba_log_sysevent(mpt,
6910 					    ESC_SAS_PHY_EVENT,
6911 					    SAS_PHY_ONLINE,
6912 					    &mpt->m_phy_info[i].smhba_info);
6913 					break;
6914 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6915 					(void) sprintf(curr, "is online at 3.0 "
6916 					    "Gbps");
6917 					if ((expd_handle == 0) &&
6918 					    (enc_handle == 1)) {
6919 						mpt->m_port_chng = 1;
6920 					}
6921 					mptsas_smhba_log_sysevent(mpt,
6922 					    ESC_SAS_PHY_EVENT,
6923 					    SAS_PHY_ONLINE,
6924 					    &mpt->m_phy_info[i].smhba_info);
6925 					break;
6926 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6927 					(void) sprintf(curr, "is online at "
6928 					    "6.0 Gbps");
6929 					if ((expd_handle == 0) &&
6930 					    (enc_handle == 1)) {
6931 						mpt->m_port_chng = 1;
6932 					}
6933 					mptsas_smhba_log_sysevent(mpt,
6934 					    ESC_SAS_PHY_EVENT,
6935 					    SAS_PHY_ONLINE,
6936 					    &mpt->m_phy_info[i].smhba_info);
6937 					break;
6938 				default:
6939 					(void) sprintf(curr, "state is "
6940 					    "unknown");
6941 					break;
6942 				}
6943 
6944 				state = (link_rate &
6945 				    MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6946 				    MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6947 				switch (state) {
6948 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6949 					(void) sprintf(prev, ", was disabled");
6950 					break;
6951 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6952 					(void) sprintf(prev, ", was offline, "
6953 					    "failed speed negotiation");
6954 					break;
6955 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6956 					(void) sprintf(prev, ", was SATA OOB "
6957 					    "complete");
6958 					break;
6959 				case SMP_RESET_IN_PROGRESS:
6960 					(void) sprintf(prev, ", was SMP reset "
6961 					    "in progress");
6962 					break;
6963 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6964 					(void) sprintf(prev, ", was online at "
6965 					    "1.5 Gbps");
6966 					break;
6967 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6968 					(void) sprintf(prev, ", was online at "
6969 					    "3.0 Gbps");
6970 					break;
6971 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6972 					(void) sprintf(prev, ", was online at "
6973 					    "6.0 Gbps");
6974 					break;
6975 				default:
6976 				break;
6977 				}
6978 				(void) sprintf(&string[strlen(string)], "link "
6979 				    "changed, ");
6980 				break;
6981 			case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6982 				continue;
6983 			case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6984 				(void) sprintf(&string[strlen(string)],
6985 				    "target not responding, delaying "
6986 				    "removal");
6987 				break;
6988 			}
6989 			NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6990 			    mpt->m_instance, phy, dev_handle, string, curr,
6991 			    prev));
6992 		}
6993 		if (topo_head != NULL) {
6994 			/*
6995 			 * Launch DR taskq to handle topology change
6996 			 */
6997 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6998 			    mptsas_handle_dr, (void *)topo_head,
6999 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
7000 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7001 				    "for handle SAS DR event failed. \n");
7002 			}
7003 		}
7004 		break;
7005 	}
7006 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7007 	{
7008 		Mpi2EventDataIrConfigChangeList_t	*irChangeList;
7009 		mptsas_topo_change_list_t		*topo_head = NULL;
7010 		mptsas_topo_change_list_t		*topo_tail = NULL;
7011 		mptsas_topo_change_list_t		*topo_node = NULL;
7012 		mptsas_target_t				*ptgt;
7013 		mptsas_hash_table_t			*tgttbl;
7014 		uint8_t					num_entries, i, reason;
7015 		uint16_t				volhandle, diskhandle;
7016 
7017 		irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7018 		    eventreply->EventData;
7019 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7020 		    &irChangeList->NumElements);
7021 
7022 		tgttbl = &mpt->m_active->m_tgttbl;
7023 
7024 		NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7025 		    mpt->m_instance));
7026 
7027 		for (i = 0; i < num_entries; i++) {
7028 			reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7029 			    &irChangeList->ConfigElement[i].ReasonCode);
7030 			volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7031 			    &irChangeList->ConfigElement[i].VolDevHandle);
7032 			diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7033 			    &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7034 
7035 			switch (reason) {
7036 			case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7037 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7038 			{
7039 				NDBG20(("mptsas %d volume added\n",
7040 				    mpt->m_instance));
7041 
7042 				topo_node = kmem_zalloc(
7043 				    sizeof (mptsas_topo_change_list_t),
7044 				    KM_SLEEP);
7045 
7046 				topo_node->mpt = mpt;
7047 				topo_node->event =
7048 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7049 				topo_node->un.physport = 0xff;
7050 				topo_node->devhdl = volhandle;
7051 				topo_node->flags =
7052 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7053 				topo_node->object = NULL;
7054 				if (topo_head == NULL) {
7055 					topo_head = topo_tail = topo_node;
7056 				} else {
7057 					topo_tail->next = topo_node;
7058 					topo_tail = topo_node;
7059 				}
7060 				break;
7061 			}
7062 			case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7063 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7064 			{
7065 				NDBG20(("mptsas %d volume deleted\n",
7066 				    mpt->m_instance));
7067 				ptgt = mptsas_search_by_devhdl(tgttbl,
7068 				    volhandle);
7069 				if (ptgt == NULL)
7070 					break;
7071 
7072 				/*
7073 				 * Clear any flags related to volume
7074 				 */
7075 				(void) mptsas_delete_volume(mpt, volhandle);
7076 
7077 				/*
7078 				 * Update DR flag immediately avoid I/O failure
7079 				 */
7080 				mutex_enter(&mpt->m_tx_waitq_mutex);
7081 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7082 				mutex_exit(&mpt->m_tx_waitq_mutex);
7083 
7084 				topo_node = kmem_zalloc(
7085 				    sizeof (mptsas_topo_change_list_t),
7086 				    KM_SLEEP);
7087 				topo_node->mpt = mpt;
7088 				topo_node->un.phymask = ptgt->m_phymask;
7089 				topo_node->event =
7090 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7091 				topo_node->devhdl = volhandle;
7092 				topo_node->flags =
7093 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7094 				topo_node->object = (void *)ptgt;
7095 				if (topo_head == NULL) {
7096 					topo_head = topo_tail = topo_node;
7097 				} else {
7098 					topo_tail->next = topo_node;
7099 					topo_tail = topo_node;
7100 				}
7101 				break;
7102 			}
7103 			case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7104 			case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7105 			{
7106 				ptgt = mptsas_search_by_devhdl(tgttbl,
7107 				    diskhandle);
7108 				if (ptgt == NULL)
7109 					break;
7110 
7111 				/*
7112 				 * Update DR flag immediately avoid I/O failure
7113 				 */
7114 				mutex_enter(&mpt->m_tx_waitq_mutex);
7115 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7116 				mutex_exit(&mpt->m_tx_waitq_mutex);
7117 
7118 				topo_node = kmem_zalloc(
7119 				    sizeof (mptsas_topo_change_list_t),
7120 				    KM_SLEEP);
7121 				topo_node->mpt = mpt;
7122 				topo_node->un.phymask = ptgt->m_phymask;
7123 				topo_node->event =
7124 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7125 				topo_node->devhdl = diskhandle;
7126 				topo_node->flags =
7127 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7128 				topo_node->object = (void *)ptgt;
7129 				if (topo_head == NULL) {
7130 					topo_head = topo_tail = topo_node;
7131 				} else {
7132 					topo_tail->next = topo_node;
7133 					topo_tail = topo_node;
7134 				}
7135 				break;
7136 			}
7137 			case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7138 			case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7139 			{
7140 				/*
7141 				 * The physical drive is released by a IR
7142 				 * volume. But we cannot get the the physport
7143 				 * or phynum from the event data, so we only
7144 				 * can get the physport/phynum after SAS
7145 				 * Device Page0 request for the devhdl.
7146 				 */
7147 				topo_node = kmem_zalloc(
7148 				    sizeof (mptsas_topo_change_list_t),
7149 				    KM_SLEEP);
7150 				topo_node->mpt = mpt;
7151 				topo_node->un.phymask = 0;
7152 				topo_node->event =
7153 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7154 				topo_node->devhdl = diskhandle;
7155 				topo_node->flags =
7156 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7157 				topo_node->object = NULL;
7158 				mpt->m_port_chng = 1;
7159 				if (topo_head == NULL) {
7160 					topo_head = topo_tail = topo_node;
7161 				} else {
7162 					topo_tail->next = topo_node;
7163 					topo_tail = topo_node;
7164 				}
7165 				break;
7166 			}
7167 			default:
7168 				break;
7169 			}
7170 		}
7171 
7172 		if (topo_head != NULL) {
7173 			/*
7174 			 * Launch DR taskq to handle topology change
7175 			 */
7176 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7177 			    mptsas_handle_dr, (void *)topo_head,
7178 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
7179 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7180 				    "for handle SAS DR event failed. \n");
7181 			}
7182 		}
7183 		break;
7184 	}
7185 	default:
7186 		return (DDI_FAILURE);
7187 	}
7188 
7189 	return (DDI_SUCCESS);
7190 }
7191 
7192 /*
7193  * handle events from ioc
7194  */
7195 static void
7196 mptsas_handle_event(void *args)
7197 {
7198 	m_replyh_arg_t			*replyh_arg;
7199 	pMpi2EventNotificationReply_t	eventreply;
7200 	uint32_t			event, iocloginfo, rfm;
7201 	uint32_t			status;
7202 	uint8_t				port;
7203 	mptsas_t			*mpt;
7204 	uint_t				iocstatus;
7205 
7206 	replyh_arg = (m_replyh_arg_t *)args;
7207 	rfm = replyh_arg->rfm;
7208 	mpt = replyh_arg->mpt;
7209 
7210 	mutex_enter(&mpt->m_mutex);
7211 
7212 	eventreply = (pMpi2EventNotificationReply_t)
7213 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7214 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7215 
7216 	if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7217 	    &eventreply->IOCStatus)) {
7218 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7219 			mptsas_log(mpt, CE_WARN,
7220 			    "!mptsas_handle_event: IOCStatus=0x%x, "
7221 			    "IOCLogInfo=0x%x", iocstatus,
7222 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7223 			    &eventreply->IOCLogInfo));
7224 		} else {
7225 			mptsas_log(mpt, CE_WARN,
7226 			    "mptsas_handle_event: IOCStatus=0x%x, "
7227 			    "IOCLogInfo=0x%x", iocstatus,
7228 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7229 			    &eventreply->IOCLogInfo));
7230 		}
7231 	}
7232 
7233 	/*
7234 	 * figure out what kind of event we got and handle accordingly
7235 	 */
7236 	switch (event) {
7237 	case MPI2_EVENT_LOG_ENTRY_ADDED:
7238 		break;
7239 	case MPI2_EVENT_LOG_DATA:
7240 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7241 		    &eventreply->IOCLogInfo);
7242 		NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7243 		    iocloginfo));
7244 		break;
7245 	case MPI2_EVENT_STATE_CHANGE:
7246 		NDBG20(("mptsas%d state change.", mpt->m_instance));
7247 		break;
7248 	case MPI2_EVENT_HARD_RESET_RECEIVED:
7249 		NDBG20(("mptsas%d event change.", mpt->m_instance));
7250 		break;
7251 	case MPI2_EVENT_SAS_DISCOVERY:
7252 	{
7253 		MPI2_EVENT_DATA_SAS_DISCOVERY	*sasdiscovery;
7254 		char				string[80];
7255 		uint8_t				rc;
7256 
7257 		sasdiscovery =
7258 		    (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7259 
7260 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7261 		    &sasdiscovery->ReasonCode);
7262 		port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7263 		    &sasdiscovery->PhysicalPort);
7264 		status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7265 		    &sasdiscovery->DiscoveryStatus);
7266 
7267 		string[0] = 0;
7268 		switch (rc) {
7269 		case MPI2_EVENT_SAS_DISC_RC_STARTED:
7270 			(void) sprintf(string, "STARTING");
7271 			break;
7272 		case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7273 			(void) sprintf(string, "COMPLETED");
7274 			break;
7275 		default:
7276 			(void) sprintf(string, "UNKNOWN");
7277 			break;
7278 		}
7279 
7280 		NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7281 		    port, status));
7282 
7283 		break;
7284 	}
7285 	case MPI2_EVENT_EVENT_CHANGE:
7286 		NDBG20(("mptsas%d event change.", mpt->m_instance));
7287 		break;
7288 	case MPI2_EVENT_TASK_SET_FULL:
7289 	{
7290 		pMpi2EventDataTaskSetFull_t	taskfull;
7291 
7292 		taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7293 
7294 		NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7295 		    mpt->m_instance,  ddi_get16(mpt->m_acc_reply_frame_hdl,
7296 		    &taskfull->CurrentDepth)));
7297 		break;
7298 	}
7299 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7300 	{
7301 		/*
7302 		 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7303 		 * in mptsas_handle_event_sync() of interrupt context
7304 		 */
7305 		break;
7306 	}
7307 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7308 	{
7309 		pMpi2EventDataSasEnclDevStatusChange_t	encstatus;
7310 		uint8_t					rc;
7311 		char					string[80];
7312 
7313 		encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7314 		    eventreply->EventData;
7315 
7316 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7317 		    &encstatus->ReasonCode);
7318 		switch (rc) {
7319 		case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7320 			(void) sprintf(string, "added");
7321 			break;
7322 		case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7323 			(void) sprintf(string, ", not responding");
7324 			break;
7325 		default:
7326 		break;
7327 		}
7328 		NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7329 		    mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7330 		    &encstatus->EnclosureHandle), string));
7331 		break;
7332 	}
7333 
7334 	/*
7335 	 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7336 	 * mptsas_handle_event_sync,in here just send ack message.
7337 	 */
7338 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7339 	{
7340 		pMpi2EventDataSasDeviceStatusChange_t	statuschange;
7341 		uint8_t					rc;
7342 		uint16_t				devhdl;
7343 		uint64_t				wwn = 0;
7344 		uint32_t				wwn_lo, wwn_hi;
7345 
7346 		statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7347 		    eventreply->EventData;
7348 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7349 		    &statuschange->ReasonCode);
7350 		wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7351 		    (uint32_t *)(void *)&statuschange->SASAddress);
7352 		wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7353 		    (uint32_t *)(void *)&statuschange->SASAddress + 1);
7354 		wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7355 		devhdl =  ddi_get16(mpt->m_acc_reply_frame_hdl,
7356 		    &statuschange->DevHandle);
7357 
7358 		NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7359 		    wwn));
7360 
7361 		switch (rc) {
7362 		case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7363 			NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7364 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
7365 			    &statuschange->ASC),
7366 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
7367 			    &statuschange->ASCQ)));
7368 			break;
7369 
7370 		case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7371 			NDBG20(("Device not supported"));
7372 			break;
7373 
7374 		case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7375 			NDBG20(("IOC internally generated the Target Reset "
7376 			    "for devhdl:%x", devhdl));
7377 			break;
7378 
7379 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7380 			NDBG20(("IOC's internally generated Target Reset "
7381 			    "completed for devhdl:%x", devhdl));
7382 			break;
7383 
7384 		case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7385 			NDBG20(("IOC internally generated Abort Task"));
7386 			break;
7387 
7388 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7389 			NDBG20(("IOC's internally generated Abort Task "
7390 			    "completed"));
7391 			break;
7392 
7393 		case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7394 			NDBG20(("IOC internally generated Abort Task Set"));
7395 			break;
7396 
7397 		case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7398 			NDBG20(("IOC internally generated Clear Task Set"));
7399 			break;
7400 
7401 		case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7402 			NDBG20(("IOC internally generated Query Task"));
7403 			break;
7404 
7405 		case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7406 			NDBG20(("Device sent an Asynchronous Notification"));
7407 			break;
7408 
7409 		default:
7410 			break;
7411 		}
7412 		break;
7413 	}
7414 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7415 	{
7416 		/*
7417 		 * IR TOPOLOGY CHANGE LIST Event has already been handled
7418 		 * in mpt_handle_event_sync() of interrupt context
7419 		 */
7420 		break;
7421 	}
7422 	case MPI2_EVENT_IR_OPERATION_STATUS:
7423 	{
7424 		Mpi2EventDataIrOperationStatus_t	*irOpStatus;
7425 		char					reason_str[80];
7426 		uint8_t					rc, percent;
7427 		uint16_t				handle;
7428 
7429 		irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7430 		    eventreply->EventData;
7431 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7432 		    &irOpStatus->RAIDOperation);
7433 		percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7434 		    &irOpStatus->PercentComplete);
7435 		handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7436 		    &irOpStatus->VolDevHandle);
7437 
7438 		switch (rc) {
7439 			case MPI2_EVENT_IR_RAIDOP_RESYNC:
7440 				(void) sprintf(reason_str, "resync");
7441 				break;
7442 			case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7443 				(void) sprintf(reason_str, "online capacity "
7444 				    "expansion");
7445 				break;
7446 			case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7447 				(void) sprintf(reason_str, "consistency check");
7448 				break;
7449 			default:
7450 				(void) sprintf(reason_str, "unknown reason %x",
7451 				    rc);
7452 		}
7453 
7454 		NDBG20(("mptsas%d raid operational status: (%s)"
7455 		    "\thandle(0x%04x), percent complete(%d)\n",
7456 		    mpt->m_instance, reason_str, handle, percent));
7457 		break;
7458 	}
7459 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7460 	{
7461 		pMpi2EventDataSasBroadcastPrimitive_t	sas_broadcast;
7462 		uint8_t					phy_num;
7463 		uint8_t					primitive;
7464 
7465 		sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7466 		    eventreply->EventData;
7467 
7468 		phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7469 		    &sas_broadcast->PhyNum);
7470 		primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7471 		    &sas_broadcast->Primitive);
7472 
7473 		switch (primitive) {
7474 		case MPI2_EVENT_PRIMITIVE_CHANGE:
7475 			mptsas_smhba_log_sysevent(mpt,
7476 			    ESC_SAS_HBA_PORT_BROADCAST,
7477 			    SAS_PORT_BROADCAST_CHANGE,
7478 			    &mpt->m_phy_info[phy_num].smhba_info);
7479 			break;
7480 		case MPI2_EVENT_PRIMITIVE_SES:
7481 			mptsas_smhba_log_sysevent(mpt,
7482 			    ESC_SAS_HBA_PORT_BROADCAST,
7483 			    SAS_PORT_BROADCAST_SES,
7484 			    &mpt->m_phy_info[phy_num].smhba_info);
7485 			break;
7486 		case MPI2_EVENT_PRIMITIVE_EXPANDER:
7487 			mptsas_smhba_log_sysevent(mpt,
7488 			    ESC_SAS_HBA_PORT_BROADCAST,
7489 			    SAS_PORT_BROADCAST_D01_4,
7490 			    &mpt->m_phy_info[phy_num].smhba_info);
7491 			break;
7492 		case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7493 			mptsas_smhba_log_sysevent(mpt,
7494 			    ESC_SAS_HBA_PORT_BROADCAST,
7495 			    SAS_PORT_BROADCAST_D04_7,
7496 			    &mpt->m_phy_info[phy_num].smhba_info);
7497 			break;
7498 		case MPI2_EVENT_PRIMITIVE_RESERVED3:
7499 			mptsas_smhba_log_sysevent(mpt,
7500 			    ESC_SAS_HBA_PORT_BROADCAST,
7501 			    SAS_PORT_BROADCAST_D16_7,
7502 			    &mpt->m_phy_info[phy_num].smhba_info);
7503 			break;
7504 		case MPI2_EVENT_PRIMITIVE_RESERVED4:
7505 			mptsas_smhba_log_sysevent(mpt,
7506 			    ESC_SAS_HBA_PORT_BROADCAST,
7507 			    SAS_PORT_BROADCAST_D29_7,
7508 			    &mpt->m_phy_info[phy_num].smhba_info);
7509 			break;
7510 		case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7511 			mptsas_smhba_log_sysevent(mpt,
7512 			    ESC_SAS_HBA_PORT_BROADCAST,
7513 			    SAS_PORT_BROADCAST_D24_0,
7514 			    &mpt->m_phy_info[phy_num].smhba_info);
7515 			break;
7516 		case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7517 			mptsas_smhba_log_sysevent(mpt,
7518 			    ESC_SAS_HBA_PORT_BROADCAST,
7519 			    SAS_PORT_BROADCAST_D27_4,
7520 			    &mpt->m_phy_info[phy_num].smhba_info);
7521 			break;
7522 		default:
7523 			NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7524 			    " %x received",
7525 			    mpt->m_instance, primitive));
7526 			break;
7527 		}
7528 		NDBG20(("mptsas%d sas broadcast primitive: "
7529 		    "\tprimitive(0x%04x), phy(%d) complete\n",
7530 		    mpt->m_instance, primitive, phy_num));
7531 		break;
7532 	}
7533 	case MPI2_EVENT_IR_VOLUME:
7534 	{
7535 		Mpi2EventDataIrVolume_t		*irVolume;
7536 		uint16_t			devhandle;
7537 		uint32_t			state;
7538 		int				config, vol;
7539 		mptsas_slots_t			*slots = mpt->m_active;
7540 		uint8_t				found = FALSE;
7541 
7542 		irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7543 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7544 		    &irVolume->NewValue);
7545 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7546 		    &irVolume->VolDevHandle);
7547 
7548 		NDBG20(("EVENT_IR_VOLUME event is received"));
7549 
7550 		/*
7551 		 * Get latest RAID info and then find the DevHandle for this
7552 		 * event in the configuration.  If the DevHandle is not found
7553 		 * just exit the event.
7554 		 */
7555 		(void) mptsas_get_raid_info(mpt);
7556 		for (config = 0; (config < slots->m_num_raid_configs) &&
7557 		    (!found); config++) {
7558 			for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7559 				if (slots->m_raidconfig[config].m_raidvol[vol].
7560 				    m_raidhandle == devhandle) {
7561 					found = TRUE;
7562 					break;
7563 				}
7564 			}
7565 		}
7566 		if (!found) {
7567 			break;
7568 		}
7569 
7570 		switch (irVolume->ReasonCode) {
7571 		case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7572 		{
7573 			uint32_t i;
7574 			slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7575 			    state;
7576 
7577 			i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7578 			mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7579 			    ", auto-config of hot-swap drives is %s"
7580 			    ", write caching is %s"
7581 			    ", hot-spare pool mask is %02x\n",
7582 			    vol, state &
7583 			    MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7584 			    ? "disabled" : "enabled",
7585 			    i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7586 			    ? "controlled by member disks" :
7587 			    i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7588 			    ? "disabled" :
7589 			    i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7590 			    ? "enabled" :
7591 			    "incorrectly set",
7592 			    (state >> 16) & 0xff);
7593 				break;
7594 		}
7595 		case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7596 		{
7597 			slots->m_raidconfig[config].m_raidvol[vol].m_state =
7598 			    (uint8_t)state;
7599 
7600 			mptsas_log(mpt, CE_NOTE,
7601 			    "Volume %d is now %s\n", vol,
7602 			    state == MPI2_RAID_VOL_STATE_OPTIMAL
7603 			    ? "optimal" :
7604 			    state == MPI2_RAID_VOL_STATE_DEGRADED
7605 			    ? "degraded" :
7606 			    state == MPI2_RAID_VOL_STATE_ONLINE
7607 			    ? "online" :
7608 			    state == MPI2_RAID_VOL_STATE_INITIALIZING
7609 			    ? "initializing" :
7610 			    state == MPI2_RAID_VOL_STATE_FAILED
7611 			    ? "failed" :
7612 			    state == MPI2_RAID_VOL_STATE_MISSING
7613 			    ? "missing" :
7614 			    "state unknown");
7615 			break;
7616 		}
7617 		case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7618 		{
7619 			slots->m_raidconfig[config].m_raidvol[vol].
7620 			    m_statusflags = state;
7621 
7622 			mptsas_log(mpt, CE_NOTE,
7623 			    " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7624 			    vol,
7625 			    state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7626 			    ? ", enabled" : ", disabled",
7627 			    state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7628 			    ? ", quiesced" : "",
7629 			    state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7630 			    ? ", inactive" : ", active",
7631 			    state &
7632 			    MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7633 			    ? ", bad block table is full" : "",
7634 			    state &
7635 			    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7636 			    ? ", resync in progress" : "",
7637 			    state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7638 			    ? ", background initialization in progress" : "",
7639 			    state &
7640 			    MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7641 			    ? ", capacity expansion in progress" : "",
7642 			    state &
7643 			    MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7644 			    ? ", consistency check in progress" : "",
7645 			    state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7646 			    ? ", data scrub in progress" : "");
7647 			break;
7648 		}
7649 		default:
7650 			break;
7651 		}
7652 		break;
7653 	}
7654 	case MPI2_EVENT_IR_PHYSICAL_DISK:
7655 	{
7656 		Mpi2EventDataIrPhysicalDisk_t	*irPhysDisk;
7657 		uint16_t			devhandle, enchandle, slot;
7658 		uint32_t			status, state;
7659 		uint8_t				physdisknum, reason;
7660 
7661 		irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7662 		    eventreply->EventData;
7663 		physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7664 		    &irPhysDisk->PhysDiskNum);
7665 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7666 		    &irPhysDisk->PhysDiskDevHandle);
7667 		enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7668 		    &irPhysDisk->EnclosureHandle);
7669 		slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7670 		    &irPhysDisk->Slot);
7671 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7672 		    &irPhysDisk->NewValue);
7673 		reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7674 		    &irPhysDisk->ReasonCode);
7675 
7676 		NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7677 
7678 		switch (reason) {
7679 		case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7680 			mptsas_log(mpt, CE_NOTE,
7681 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7682 			    "for enclosure with handle 0x%x is now in hot "
7683 			    "spare pool %d",
7684 			    physdisknum, devhandle, slot, enchandle,
7685 			    (state >> 16) & 0xff);
7686 			break;
7687 
7688 		case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7689 			status = state;
7690 			mptsas_log(mpt, CE_NOTE,
7691 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7692 			    "for enclosure with handle 0x%x is now "
7693 			    "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7694 			    enchandle,
7695 			    status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7696 			    ? ", inactive" : ", active",
7697 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7698 			    ? ", out of sync" : "",
7699 			    status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7700 			    ? ", quiesced" : "",
7701 			    status &
7702 			    MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7703 			    ? ", write cache enabled" : "",
7704 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7705 			    ? ", capacity expansion target" : "");
7706 			break;
7707 
7708 		case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7709 			mptsas_log(mpt, CE_NOTE,
7710 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7711 			    "for enclosure with handle 0x%x is now %s\n",
7712 			    physdisknum, devhandle, slot, enchandle,
7713 			    state == MPI2_RAID_PD_STATE_OPTIMAL
7714 			    ? "optimal" :
7715 			    state == MPI2_RAID_PD_STATE_REBUILDING
7716 			    ? "rebuilding" :
7717 			    state == MPI2_RAID_PD_STATE_DEGRADED
7718 			    ? "degraded" :
7719 			    state == MPI2_RAID_PD_STATE_HOT_SPARE
7720 			    ? "a hot spare" :
7721 			    state == MPI2_RAID_PD_STATE_ONLINE
7722 			    ? "online" :
7723 			    state == MPI2_RAID_PD_STATE_OFFLINE
7724 			    ? "offline" :
7725 			    state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7726 			    ? "not compatible" :
7727 			    state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7728 			    ? "not configured" :
7729 			    "state unknown");
7730 			break;
7731 		}
7732 		break;
7733 	}
7734 	default:
7735 		NDBG20(("mptsas%d: unknown event %x received",
7736 		    mpt->m_instance, event));
7737 		break;
7738 	}
7739 
7740 	/*
7741 	 * Return the reply frame to the free queue.
7742 	 */
7743 	ddi_put32(mpt->m_acc_free_queue_hdl,
7744 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7745 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7746 	    DDI_DMA_SYNC_FORDEV);
7747 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7748 		mpt->m_free_index = 0;
7749 	}
7750 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7751 	    mpt->m_free_index);
7752 	mutex_exit(&mpt->m_mutex);
7753 }
7754 
7755 /*
7756  * invoked from timeout() to restart qfull cmds with throttle == 0
7757  */
7758 static void
7759 mptsas_restart_cmd(void *arg)
7760 {
7761 	mptsas_t	*mpt = arg;
7762 	mptsas_target_t	*ptgt = NULL;
7763 
7764 	mutex_enter(&mpt->m_mutex);
7765 
7766 	mpt->m_restart_cmd_timeid = 0;
7767 
7768 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7769 	    MPTSAS_HASH_FIRST);
7770 	while (ptgt != NULL) {
7771 		if (ptgt->m_reset_delay == 0) {
7772 			if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7773 				mptsas_set_throttle(mpt, ptgt,
7774 				    MAX_THROTTLE);
7775 			}
7776 		}
7777 
7778 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7779 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7780 	}
7781 	mptsas_restart_hba(mpt);
7782 	mutex_exit(&mpt->m_mutex);
7783 }
7784 
7785 void
7786 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7787 {
7788 	int		slot;
7789 	mptsas_slots_t	*slots = mpt->m_active;
7790 	int		t;
7791 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
7792 
7793 	ASSERT(cmd != NULL);
7794 	ASSERT(cmd->cmd_queued == FALSE);
7795 
7796 	/*
7797 	 * Task Management cmds are removed in their own routines.  Also,
7798 	 * we don't want to modify timeout based on TM cmds.
7799 	 */
7800 	if (cmd->cmd_flags & CFLAG_TM_CMD) {
7801 		return;
7802 	}
7803 
7804 	t = Tgt(cmd);
7805 	slot = cmd->cmd_slot;
7806 
7807 	/*
7808 	 * remove the cmd.
7809 	 */
7810 	if (cmd == slots->m_slot[slot]) {
7811 		NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7812 		slots->m_slot[slot] = NULL;
7813 		mpt->m_ncmds--;
7814 
7815 		/*
7816 		 * only decrement per target ncmds if command
7817 		 * has a target associated with it.
7818 		 */
7819 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7820 			ptgt->m_t_ncmds--;
7821 			/*
7822 			 * reset throttle if we just ran an untagged command
7823 			 * to a tagged target
7824 			 */
7825 			if ((ptgt->m_t_ncmds == 0) &&
7826 			    ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7827 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7828 			}
7829 		}
7830 
7831 	}
7832 
7833 	/*
7834 	 * This is all we need to do for ioc commands.
7835 	 */
7836 	if (cmd->cmd_flags & CFLAG_CMDIOC) {
7837 		mptsas_return_to_pool(mpt, cmd);
7838 		return;
7839 	}
7840 
7841 	/*
7842 	 * Figure out what to set tag Q timeout for...
7843 	 *
7844 	 * Optimize: If we have duplicate's of same timeout
7845 	 * we're using, then we'll use it again until we run
7846 	 * out of duplicates.  This should be the normal case
7847 	 * for block and raw I/O.
7848 	 * If no duplicates, we have to scan through tag que and
7849 	 * find the longest timeout value and use it.  This is
7850 	 * going to take a while...
7851 	 * Add 1 to m_n_slots to account for TM request.
7852 	 */
7853 	if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7854 		if (--(ptgt->m_dups) == 0) {
7855 			if (ptgt->m_t_ncmds) {
7856 				mptsas_cmd_t *ssp;
7857 				uint_t n = 0;
7858 				ushort_t nslots = (slots->m_n_slots + 1);
7859 				ushort_t i;
7860 				/*
7861 				 * This crude check assumes we don't do
7862 				 * this too often which seems reasonable
7863 				 * for block and raw I/O.
7864 				 */
7865 				for (i = 0; i < nslots; i++) {
7866 					ssp = slots->m_slot[i];
7867 					if (ssp && (Tgt(ssp) == t) &&
7868 					    (ssp->cmd_pkt->pkt_time > n)) {
7869 						n = ssp->cmd_pkt->pkt_time;
7870 						ptgt->m_dups = 1;
7871 					} else if (ssp && (Tgt(ssp) == t) &&
7872 					    (ssp->cmd_pkt->pkt_time == n)) {
7873 						ptgt->m_dups++;
7874 					}
7875 				}
7876 				ptgt->m_timebase = n;
7877 			} else {
7878 				ptgt->m_dups = 0;
7879 				ptgt->m_timebase = 0;
7880 			}
7881 		}
7882 	}
7883 	ptgt->m_timeout = ptgt->m_timebase;
7884 
7885 	ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7886 }
7887 
7888 /*
7889  * accept all cmds on the tx_waitq if any and then
7890  * start a fresh request from the top of the device queue.
7891  *
7892  * since there are always cmds queued on the tx_waitq, and rare cmds on
7893  * the instance waitq, so this function should not be invoked in the ISR,
7894  * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7895  * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7896  */
7897 static void
7898 mptsas_restart_hba(mptsas_t *mpt)
7899 {
7900 	ASSERT(mutex_owned(&mpt->m_mutex));
7901 
7902 	mutex_enter(&mpt->m_tx_waitq_mutex);
7903 	if (mpt->m_tx_waitq) {
7904 		mptsas_accept_tx_waitq(mpt);
7905 	}
7906 	mutex_exit(&mpt->m_tx_waitq_mutex);
7907 	mptsas_restart_waitq(mpt);
7908 }
7909 
7910 /*
7911  * start a fresh request from the top of the device queue
7912  */
7913 static void
7914 mptsas_restart_waitq(mptsas_t *mpt)
7915 {
7916 	mptsas_cmd_t	*cmd, *next_cmd;
7917 	mptsas_target_t *ptgt = NULL;
7918 
7919 	NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7920 
7921 	ASSERT(mutex_owned(&mpt->m_mutex));
7922 
7923 	/*
7924 	 * If there is a reset delay, don't start any cmds.  Otherwise, start
7925 	 * as many cmds as possible.
7926 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7927 	 * commands is m_max_requests - 2.
7928 	 */
7929 	cmd = mpt->m_waitq;
7930 
7931 	while (cmd != NULL) {
7932 		next_cmd = cmd->cmd_linkp;
7933 		if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7934 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7935 				/*
7936 				 * passthru command get slot need
7937 				 * set CFLAG_PREPARED.
7938 				 */
7939 				cmd->cmd_flags |= CFLAG_PREPARED;
7940 				mptsas_waitq_delete(mpt, cmd);
7941 				mptsas_start_passthru(mpt, cmd);
7942 			}
7943 			cmd = next_cmd;
7944 			continue;
7945 		}
7946 		if (cmd->cmd_flags & CFLAG_CONFIG) {
7947 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7948 				/*
7949 				 * Send the config page request and delete it
7950 				 * from the waitq.
7951 				 */
7952 				cmd->cmd_flags |= CFLAG_PREPARED;
7953 				mptsas_waitq_delete(mpt, cmd);
7954 				mptsas_start_config_page_access(mpt, cmd);
7955 			}
7956 			cmd = next_cmd;
7957 			continue;
7958 		}
7959 		if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7960 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7961 				/*
7962 				 * Send the FW Diag request and delete if from
7963 				 * the waitq.
7964 				 */
7965 				cmd->cmd_flags |= CFLAG_PREPARED;
7966 				mptsas_waitq_delete(mpt, cmd);
7967 				mptsas_start_diag(mpt, cmd);
7968 			}
7969 			cmd = next_cmd;
7970 			continue;
7971 		}
7972 
7973 		ptgt = cmd->cmd_tgt_addr;
7974 		if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7975 		    (ptgt->m_t_ncmds == 0)) {
7976 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7977 		}
7978 		if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7979 		    (ptgt && (ptgt->m_reset_delay == 0)) &&
7980 		    (ptgt && (ptgt->m_t_ncmds <
7981 		    ptgt->m_t_throttle))) {
7982 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7983 				mptsas_waitq_delete(mpt, cmd);
7984 				(void) mptsas_start_cmd(mpt, cmd);
7985 			}
7986 		}
7987 		cmd = next_cmd;
7988 	}
7989 }
7990 /*
7991  * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7992  * Accept all those queued cmds before new cmd is accept so that the
7993  * cmds are sent in order.
7994  */
7995 static void
7996 mptsas_accept_tx_waitq(mptsas_t *mpt)
7997 {
7998 	mptsas_cmd_t *cmd;
7999 
8000 	ASSERT(mutex_owned(&mpt->m_mutex));
8001 	ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8002 
8003 	/*
8004 	 * A Bus Reset could occur at any time and flush the tx_waitq,
8005 	 * so we cannot count on the tx_waitq to contain even one cmd.
8006 	 * And when the m_tx_waitq_mutex is released and run
8007 	 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8008 	 */
8009 	cmd = mpt->m_tx_waitq;
8010 	for (;;) {
8011 		if ((cmd = mpt->m_tx_waitq) == NULL) {
8012 			mpt->m_tx_draining = 0;
8013 			break;
8014 		}
8015 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8016 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8017 		}
8018 		cmd->cmd_linkp = NULL;
8019 		mutex_exit(&mpt->m_tx_waitq_mutex);
8020 		if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8021 			cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8022 			    "to accept cmd on queue\n");
8023 		mutex_enter(&mpt->m_tx_waitq_mutex);
8024 	}
8025 }
8026 
8027 
8028 /*
8029  * mpt tag type lookup
8030  */
8031 static char mptsas_tag_lookup[] =
8032 	{0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8033 
8034 static int
8035 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8036 {
8037 	struct scsi_pkt		*pkt = CMD2PKT(cmd);
8038 	uint32_t		control = 0;
8039 	int			n;
8040 	caddr_t			mem;
8041 	pMpi2SCSIIORequest_t	io_request;
8042 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
8043 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
8044 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
8045 	uint16_t		SMID, io_flags = 0;
8046 	uint32_t		request_desc_low, request_desc_high;
8047 
8048 	NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
8049 
8050 	/*
8051 	 * Set SMID and increment index.  Rollover to 1 instead of 0 if index
8052 	 * is at the max.  0 is an invalid SMID, so we call the first index 1.
8053 	 */
8054 	SMID = cmd->cmd_slot;
8055 
8056 	/*
8057 	 * It is possible for back to back device reset to
8058 	 * happen before the reset delay has expired.  That's
8059 	 * ok, just let the device reset go out on the bus.
8060 	 */
8061 	if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8062 		ASSERT(ptgt->m_reset_delay == 0);
8063 	}
8064 
8065 	/*
8066 	 * if a non-tagged cmd is submitted to an active tagged target
8067 	 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8068 	 * to be untagged
8069 	 */
8070 	if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8071 	    (ptgt->m_t_ncmds > 1) &&
8072 	    ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8073 	    (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8074 		if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8075 			NDBG23(("target=%d, untagged cmd, start draining\n",
8076 			    ptgt->m_devhdl));
8077 
8078 			if (ptgt->m_reset_delay == 0) {
8079 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8080 			}
8081 
8082 			mptsas_remove_cmd(mpt, cmd);
8083 			cmd->cmd_pkt_flags |= FLAG_HEAD;
8084 			mptsas_waitq_add(mpt, cmd);
8085 		}
8086 		return (DDI_FAILURE);
8087 	}
8088 
8089 	/*
8090 	 * Set correct tag bits.
8091 	 */
8092 	if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8093 		switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8094 		    FLAG_TAGMASK) >> 12)]) {
8095 		case MSG_SIMPLE_QTAG:
8096 			control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8097 			break;
8098 		case MSG_HEAD_QTAG:
8099 			control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8100 			break;
8101 		case MSG_ORDERED_QTAG:
8102 			control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8103 			break;
8104 		default:
8105 			mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8106 			break;
8107 		}
8108 	} else {
8109 		if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8110 				ptgt->m_t_throttle = 1;
8111 		}
8112 		control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8113 	}
8114 
8115 	if (cmd->cmd_pkt_flags & FLAG_TLR) {
8116 		control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8117 	}
8118 
8119 	mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8120 	io_request = (pMpi2SCSIIORequest_t)mem;
8121 
8122 	bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8123 	ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8124 	    (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8125 	mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8126 	    MPI2_FUNCTION_SCSI_IO_REQUEST);
8127 
8128 	(void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8129 	    io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8130 
8131 	io_flags = cmd->cmd_cdblen;
8132 	ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8133 	/*
8134 	 * setup the Scatter/Gather DMA list for this request
8135 	 */
8136 	if (cmd->cmd_cookiec > 0) {
8137 		mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8138 	} else {
8139 		ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8140 		    ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8141 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
8142 		    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8143 		    MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8144 	}
8145 
8146 	/*
8147 	 * save ARQ information
8148 	 */
8149 	ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8150 	if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8151 	    (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8152 		ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8153 		    cmd->cmd_ext_arqcookie.dmac_address);
8154 	} else {
8155 		ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8156 		    cmd->cmd_arqcookie.dmac_address);
8157 	}
8158 
8159 	ddi_put32(acc_hdl, &io_request->Control, control);
8160 
8161 	NDBG31(("starting message=0x%p, with cmd=0x%p",
8162 	    (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8163 
8164 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8165 
8166 	/*
8167 	 * Build request descriptor and write it to the request desc post reg.
8168 	 */
8169 	request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8170 	request_desc_high = ptgt->m_devhdl << 16;
8171 	MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8172 
8173 	/*
8174 	 * Start timeout.
8175 	 */
8176 #ifdef MPTSAS_TEST
8177 	/*
8178 	 * Temporarily set timebase = 0;  needed for
8179 	 * timeout torture test.
8180 	 */
8181 	if (mptsas_test_timeouts) {
8182 		ptgt->m_timebase = 0;
8183 	}
8184 #endif
8185 	n = pkt->pkt_time - ptgt->m_timebase;
8186 
8187 	if (n == 0) {
8188 		(ptgt->m_dups)++;
8189 		ptgt->m_timeout = ptgt->m_timebase;
8190 	} else if (n > 0) {
8191 		ptgt->m_timeout =
8192 		    ptgt->m_timebase = pkt->pkt_time;
8193 		ptgt->m_dups = 1;
8194 	} else if (n < 0) {
8195 		ptgt->m_timeout = ptgt->m_timebase;
8196 	}
8197 #ifdef MPTSAS_TEST
8198 	/*
8199 	 * Set back to a number higher than
8200 	 * mptsas_scsi_watchdog_tick
8201 	 * so timeouts will happen in mptsas_watchsubr
8202 	 */
8203 	if (mptsas_test_timeouts) {
8204 		ptgt->m_timebase = 60;
8205 	}
8206 #endif
8207 
8208 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8209 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8210 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8211 		return (DDI_FAILURE);
8212 	}
8213 	return (DDI_SUCCESS);
8214 }
8215 
8216 /*
8217  * Select a helper thread to handle current doneq
8218  */
8219 static void
8220 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8221 {
8222 	uint64_t			t, i;
8223 	uint32_t			min = 0xffffffff;
8224 	mptsas_doneq_thread_list_t	*item;
8225 
8226 	for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8227 		item = &mpt->m_doneq_thread_id[i];
8228 		/*
8229 		 * If the completed command on help thread[i] less than
8230 		 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8231 		 * pick a thread which has least completed command.
8232 		 */
8233 
8234 		mutex_enter(&item->mutex);
8235 		if (item->len < mpt->m_doneq_thread_threshold) {
8236 			t = i;
8237 			mutex_exit(&item->mutex);
8238 			break;
8239 		}
8240 		if (item->len < min) {
8241 			min = item->len;
8242 			t = i;
8243 		}
8244 		mutex_exit(&item->mutex);
8245 	}
8246 	mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8247 	mptsas_doneq_mv(mpt, t);
8248 	cv_signal(&mpt->m_doneq_thread_id[t].cv);
8249 	mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8250 }
8251 
8252 /*
8253  * move the current global doneq to the doneq of thead[t]
8254  */
8255 static void
8256 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8257 {
8258 	mptsas_cmd_t			*cmd;
8259 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
8260 
8261 	ASSERT(mutex_owned(&item->mutex));
8262 	while ((cmd = mpt->m_doneq) != NULL) {
8263 		if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8264 			mpt->m_donetail = &mpt->m_doneq;
8265 		}
8266 		cmd->cmd_linkp = NULL;
8267 		*item->donetail = cmd;
8268 		item->donetail = &cmd->cmd_linkp;
8269 		mpt->m_doneq_len--;
8270 		item->len++;
8271 	}
8272 }
8273 
8274 void
8275 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8276 {
8277 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
8278 
8279 	/* Check all acc and dma handles */
8280 	if ((mptsas_check_acc_handle(mpt->m_datap) !=
8281 	    DDI_SUCCESS) ||
8282 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8283 	    DDI_SUCCESS) ||
8284 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8285 	    DDI_SUCCESS) ||
8286 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8287 	    DDI_SUCCESS) ||
8288 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8289 	    DDI_SUCCESS) ||
8290 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8291 	    DDI_SUCCESS) ||
8292 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
8293 	    DDI_SUCCESS)) {
8294 		ddi_fm_service_impact(mpt->m_dip,
8295 		    DDI_SERVICE_UNAFFECTED);
8296 		ddi_fm_acc_err_clear(mpt->m_config_handle,
8297 		    DDI_FME_VER0);
8298 		pkt->pkt_reason = CMD_TRAN_ERR;
8299 		pkt->pkt_statistics = 0;
8300 	}
8301 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8302 	    DDI_SUCCESS) ||
8303 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8304 	    DDI_SUCCESS) ||
8305 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8306 	    DDI_SUCCESS) ||
8307 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8308 	    DDI_SUCCESS) ||
8309 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8310 	    DDI_SUCCESS)) {
8311 		ddi_fm_service_impact(mpt->m_dip,
8312 		    DDI_SERVICE_UNAFFECTED);
8313 		pkt->pkt_reason = CMD_TRAN_ERR;
8314 		pkt->pkt_statistics = 0;
8315 	}
8316 	if (cmd->cmd_dmahandle &&
8317 	    (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8318 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8319 		pkt->pkt_reason = CMD_TRAN_ERR;
8320 		pkt->pkt_statistics = 0;
8321 	}
8322 	if ((cmd->cmd_extra_frames &&
8323 	    ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8324 	    DDI_SUCCESS) ||
8325 	    (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8326 	    DDI_SUCCESS)))) {
8327 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8328 		pkt->pkt_reason = CMD_TRAN_ERR;
8329 		pkt->pkt_statistics = 0;
8330 	}
8331 	if (cmd->cmd_arqhandle &&
8332 	    (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8333 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8334 		pkt->pkt_reason = CMD_TRAN_ERR;
8335 		pkt->pkt_statistics = 0;
8336 	}
8337 	if (cmd->cmd_ext_arqhandle &&
8338 	    (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8339 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8340 		pkt->pkt_reason = CMD_TRAN_ERR;
8341 		pkt->pkt_statistics = 0;
8342 	}
8343 }
8344 
8345 /*
8346  * These routines manipulate the queue of commands that
8347  * are waiting for their completion routines to be called.
8348  * The queue is usually in FIFO order but on an MP system
8349  * it's possible for the completion routines to get out
8350  * of order. If that's a problem you need to add a global
8351  * mutex around the code that calls the completion routine
8352  * in the interrupt handler.
8353  */
8354 static void
8355 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8356 {
8357 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
8358 
8359 	NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8360 
8361 	ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8362 	cmd->cmd_linkp = NULL;
8363 	cmd->cmd_flags |= CFLAG_FINISHED;
8364 	cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8365 
8366 	mptsas_fma_check(mpt, cmd);
8367 
8368 	/*
8369 	 * only add scsi pkts that have completion routines to
8370 	 * the doneq.  no intr cmds do not have callbacks.
8371 	 */
8372 	if (pkt && (pkt->pkt_comp)) {
8373 		*mpt->m_donetail = cmd;
8374 		mpt->m_donetail = &cmd->cmd_linkp;
8375 		mpt->m_doneq_len++;
8376 	}
8377 }
8378 
8379 static mptsas_cmd_t *
8380 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8381 {
8382 	mptsas_cmd_t			*cmd;
8383 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
8384 
8385 	/* pop one off the done queue */
8386 	if ((cmd = item->doneq) != NULL) {
8387 		/* if the queue is now empty fix the tail pointer */
8388 		NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8389 		if ((item->doneq = cmd->cmd_linkp) == NULL) {
8390 			item->donetail = &item->doneq;
8391 		}
8392 		cmd->cmd_linkp = NULL;
8393 		item->len--;
8394 	}
8395 	return (cmd);
8396 }
8397 
8398 static void
8399 mptsas_doneq_empty(mptsas_t *mpt)
8400 {
8401 	if (mpt->m_doneq && !mpt->m_in_callback) {
8402 		mptsas_cmd_t	*cmd, *next;
8403 		struct scsi_pkt *pkt;
8404 
8405 		mpt->m_in_callback = 1;
8406 		cmd = mpt->m_doneq;
8407 		mpt->m_doneq = NULL;
8408 		mpt->m_donetail = &mpt->m_doneq;
8409 		mpt->m_doneq_len = 0;
8410 
8411 		mutex_exit(&mpt->m_mutex);
8412 		/*
8413 		 * run the completion routines of all the
8414 		 * completed commands
8415 		 */
8416 		while (cmd != NULL) {
8417 			next = cmd->cmd_linkp;
8418 			cmd->cmd_linkp = NULL;
8419 			/* run this command's completion routine */
8420 			cmd->cmd_flags |= CFLAG_COMPLETED;
8421 			pkt = CMD2PKT(cmd);
8422 			mptsas_pkt_comp(pkt, cmd);
8423 			cmd = next;
8424 		}
8425 		mutex_enter(&mpt->m_mutex);
8426 		mpt->m_in_callback = 0;
8427 	}
8428 }
8429 
8430 /*
8431  * These routines manipulate the target's queue of pending requests
8432  */
8433 void
8434 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8435 {
8436 	NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8437 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8438 	cmd->cmd_queued = TRUE;
8439 	if (ptgt)
8440 		ptgt->m_t_nwait++;
8441 	if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8442 		if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8443 			mpt->m_waitqtail = &cmd->cmd_linkp;
8444 		}
8445 		mpt->m_waitq = cmd;
8446 	} else {
8447 		cmd->cmd_linkp = NULL;
8448 		*(mpt->m_waitqtail) = cmd;
8449 		mpt->m_waitqtail = &cmd->cmd_linkp;
8450 	}
8451 }
8452 
8453 static mptsas_cmd_t *
8454 mptsas_waitq_rm(mptsas_t *mpt)
8455 {
8456 	mptsas_cmd_t	*cmd;
8457 	mptsas_target_t *ptgt;
8458 	NDBG7(("mptsas_waitq_rm"));
8459 
8460 	MPTSAS_WAITQ_RM(mpt, cmd);
8461 
8462 	NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8463 	if (cmd) {
8464 		ptgt = cmd->cmd_tgt_addr;
8465 		if (ptgt) {
8466 			ptgt->m_t_nwait--;
8467 			ASSERT(ptgt->m_t_nwait >= 0);
8468 		}
8469 	}
8470 	return (cmd);
8471 }
8472 
8473 /*
8474  * remove specified cmd from the middle of the wait queue.
8475  */
8476 static void
8477 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8478 {
8479 	mptsas_cmd_t	*prevp = mpt->m_waitq;
8480 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8481 
8482 	NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8483 	    (void *)mpt, (void *)cmd));
8484 	if (ptgt) {
8485 		ptgt->m_t_nwait--;
8486 		ASSERT(ptgt->m_t_nwait >= 0);
8487 	}
8488 
8489 	if (prevp == cmd) {
8490 		if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8491 			mpt->m_waitqtail = &mpt->m_waitq;
8492 
8493 		cmd->cmd_linkp = NULL;
8494 		cmd->cmd_queued = FALSE;
8495 		NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8496 		    (void *)mpt, (void *)cmd));
8497 		return;
8498 	}
8499 
8500 	while (prevp != NULL) {
8501 		if (prevp->cmd_linkp == cmd) {
8502 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8503 				mpt->m_waitqtail = &prevp->cmd_linkp;
8504 
8505 			cmd->cmd_linkp = NULL;
8506 			cmd->cmd_queued = FALSE;
8507 			NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8508 			    (void *)mpt, (void *)cmd));
8509 			return;
8510 		}
8511 		prevp = prevp->cmd_linkp;
8512 	}
8513 	cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8514 }
8515 
8516 static mptsas_cmd_t *
8517 mptsas_tx_waitq_rm(mptsas_t *mpt)
8518 {
8519 	mptsas_cmd_t *cmd;
8520 	NDBG7(("mptsas_tx_waitq_rm"));
8521 
8522 	MPTSAS_TX_WAITQ_RM(mpt, cmd);
8523 
8524 	NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8525 
8526 	return (cmd);
8527 }
8528 
8529 /*
8530  * remove specified cmd from the middle of the tx_waitq.
8531  */
8532 static void
8533 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8534 {
8535 	mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8536 
8537 	NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8538 	    (void *)mpt, (void *)cmd));
8539 
8540 	if (prevp == cmd) {
8541 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8542 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8543 
8544 		cmd->cmd_linkp = NULL;
8545 		cmd->cmd_queued = FALSE;
8546 		NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8547 		    (void *)mpt, (void *)cmd));
8548 		return;
8549 	}
8550 
8551 	while (prevp != NULL) {
8552 		if (prevp->cmd_linkp == cmd) {
8553 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8554 				mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8555 
8556 			cmd->cmd_linkp = NULL;
8557 			cmd->cmd_queued = FALSE;
8558 			NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8559 			    (void *)mpt, (void *)cmd));
8560 			return;
8561 		}
8562 		prevp = prevp->cmd_linkp;
8563 	}
8564 	cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8565 }
8566 
8567 /*
8568  * device and bus reset handling
8569  *
8570  * Notes:
8571  *	- RESET_ALL:	reset the controller
8572  *	- RESET_TARGET:	reset the target specified in scsi_address
8573  */
8574 static int
8575 mptsas_scsi_reset(struct scsi_address *ap, int level)
8576 {
8577 	mptsas_t		*mpt = ADDR2MPT(ap);
8578 	int			rval;
8579 	mptsas_tgt_private_t	*tgt_private;
8580 	mptsas_target_t		*ptgt = NULL;
8581 
8582 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8583 	ptgt = tgt_private->t_private;
8584 	if (ptgt == NULL) {
8585 		return (FALSE);
8586 	}
8587 	NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8588 	    level));
8589 
8590 	mutex_enter(&mpt->m_mutex);
8591 	/*
8592 	 * if we are not in panic set up a reset delay for this target
8593 	 */
8594 	if (!ddi_in_panic()) {
8595 		mptsas_setup_bus_reset_delay(mpt);
8596 	} else {
8597 		drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8598 	}
8599 	rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8600 	mutex_exit(&mpt->m_mutex);
8601 
8602 	/*
8603 	 * The transport layer expect to only see TRUE and
8604 	 * FALSE. Therefore, we will adjust the return value
8605 	 * if mptsas_do_scsi_reset returns FAILED.
8606 	 */
8607 	if (rval == FAILED)
8608 		rval = FALSE;
8609 	return (rval);
8610 }
8611 
8612 static int
8613 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8614 {
8615 	int		rval = FALSE;
8616 	uint8_t		config, disk;
8617 	mptsas_slots_t	*slots = mpt->m_active;
8618 
8619 	ASSERT(mutex_owned(&mpt->m_mutex));
8620 
8621 	if (mptsas_debug_resets) {
8622 		mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8623 		    devhdl);
8624 	}
8625 
8626 	/*
8627 	 * Issue a Target Reset message to the target specified but not to a
8628 	 * disk making up a raid volume.  Just look through the RAID config
8629 	 * Phys Disk list of DevHandles.  If the target's DevHandle is in this
8630 	 * list, then don't reset this target.
8631 	 */
8632 	for (config = 0; config < slots->m_num_raid_configs; config++) {
8633 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8634 			if (devhdl == slots->m_raidconfig[config].
8635 			    m_physdisk_devhdl[disk]) {
8636 				return (TRUE);
8637 			}
8638 		}
8639 	}
8640 
8641 	rval = mptsas_ioc_task_management(mpt,
8642 	    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8643 
8644 	mptsas_doneq_empty(mpt);
8645 	return (rval);
8646 }
8647 
8648 static int
8649 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8650 	void (*callback)(caddr_t), caddr_t arg)
8651 {
8652 	mptsas_t	*mpt = ADDR2MPT(ap);
8653 
8654 	NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8655 
8656 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8657 	    &mpt->m_mutex, &mpt->m_reset_notify_listf));
8658 }
8659 
8660 static int
8661 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8662 {
8663 	dev_info_t	*lun_dip = NULL;
8664 
8665 	ASSERT(sd != NULL);
8666 	ASSERT(name != NULL);
8667 	lun_dip = sd->sd_dev;
8668 	ASSERT(lun_dip != NULL);
8669 
8670 	if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8671 		return (1);
8672 	} else {
8673 		return (0);
8674 	}
8675 }
8676 
8677 static int
8678 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8679 {
8680 	return (mptsas_get_name(sd, name, len));
8681 }
8682 
8683 void
8684 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8685 {
8686 
8687 	NDBG25(("mptsas_set_throttle: throttle=%x", what));
8688 
8689 	/*
8690 	 * if the bus is draining/quiesced, no changes to the throttles
8691 	 * are allowed. Not allowing change of throttles during draining
8692 	 * limits error recovery but will reduce draining time
8693 	 *
8694 	 * all throttles should have been set to HOLD_THROTTLE
8695 	 */
8696 	if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8697 		return;
8698 	}
8699 
8700 	if (what == HOLD_THROTTLE) {
8701 		ptgt->m_t_throttle = HOLD_THROTTLE;
8702 	} else if (ptgt->m_reset_delay == 0) {
8703 		ptgt->m_t_throttle = what;
8704 	}
8705 }
8706 
8707 /*
8708  * Clean up from a device reset.
8709  * For the case of target reset, this function clears the waitq of all
8710  * commands for a particular target.   For the case of abort task set, this
8711  * function clears the waitq of all commonds for a particular target/lun.
8712  */
8713 static void
8714 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8715 {
8716 	mptsas_slots_t	*slots = mpt->m_active;
8717 	mptsas_cmd_t	*cmd, *next_cmd;
8718 	int		slot;
8719 	uchar_t		reason;
8720 	uint_t		stat;
8721 
8722 	NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8723 
8724 	/*
8725 	 * Make sure the I/O Controller has flushed all cmds
8726 	 * that are associated with this target for a target reset
8727 	 * and target/lun for abort task set.
8728 	 * Account for TM requests, which use the last SMID.
8729 	 */
8730 	for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8731 		if ((cmd = slots->m_slot[slot]) == NULL)
8732 			continue;
8733 		reason = CMD_RESET;
8734 		stat = STAT_DEV_RESET;
8735 		switch (tasktype) {
8736 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8737 			if (Tgt(cmd) == target) {
8738 				NDBG25(("mptsas_flush_target discovered non-"
8739 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
8740 				    tasktype));
8741 				mptsas_dump_cmd(mpt, cmd);
8742 				mptsas_remove_cmd(mpt, cmd);
8743 				mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8744 				mptsas_doneq_add(mpt, cmd);
8745 			}
8746 			break;
8747 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8748 			reason = CMD_ABORTED;
8749 			stat = STAT_ABORTED;
8750 			/*FALLTHROUGH*/
8751 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8752 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8753 
8754 				NDBG25(("mptsas_flush_target discovered non-"
8755 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
8756 				    tasktype));
8757 				mptsas_dump_cmd(mpt, cmd);
8758 				mptsas_remove_cmd(mpt, cmd);
8759 				mptsas_set_pkt_reason(mpt, cmd, reason,
8760 				    stat);
8761 				mptsas_doneq_add(mpt, cmd);
8762 			}
8763 			break;
8764 		default:
8765 			break;
8766 		}
8767 	}
8768 
8769 	/*
8770 	 * Flush the waitq and tx_waitq of this target's cmds
8771 	 */
8772 	cmd = mpt->m_waitq;
8773 
8774 	reason = CMD_RESET;
8775 	stat = STAT_DEV_RESET;
8776 
8777 	switch (tasktype) {
8778 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8779 		while (cmd != NULL) {
8780 			next_cmd = cmd->cmd_linkp;
8781 			if (Tgt(cmd) == target) {
8782 				mptsas_waitq_delete(mpt, cmd);
8783 				mptsas_set_pkt_reason(mpt, cmd,
8784 				    reason, stat);
8785 				mptsas_doneq_add(mpt, cmd);
8786 			}
8787 			cmd = next_cmd;
8788 		}
8789 		mutex_enter(&mpt->m_tx_waitq_mutex);
8790 		cmd = mpt->m_tx_waitq;
8791 		while (cmd != NULL) {
8792 			next_cmd = cmd->cmd_linkp;
8793 			if (Tgt(cmd) == target) {
8794 				mptsas_tx_waitq_delete(mpt, cmd);
8795 				mutex_exit(&mpt->m_tx_waitq_mutex);
8796 				mptsas_set_pkt_reason(mpt, cmd,
8797 				    reason, stat);
8798 				mptsas_doneq_add(mpt, cmd);
8799 				mutex_enter(&mpt->m_tx_waitq_mutex);
8800 			}
8801 			cmd = next_cmd;
8802 		}
8803 		mutex_exit(&mpt->m_tx_waitq_mutex);
8804 		break;
8805 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8806 		reason = CMD_ABORTED;
8807 		stat =  STAT_ABORTED;
8808 		/*FALLTHROUGH*/
8809 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8810 		while (cmd != NULL) {
8811 			next_cmd = cmd->cmd_linkp;
8812 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8813 				mptsas_waitq_delete(mpt, cmd);
8814 				mptsas_set_pkt_reason(mpt, cmd,
8815 				    reason, stat);
8816 				mptsas_doneq_add(mpt, cmd);
8817 			}
8818 			cmd = next_cmd;
8819 		}
8820 		mutex_enter(&mpt->m_tx_waitq_mutex);
8821 		cmd = mpt->m_tx_waitq;
8822 		while (cmd != NULL) {
8823 			next_cmd = cmd->cmd_linkp;
8824 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8825 				mptsas_tx_waitq_delete(mpt, cmd);
8826 				mutex_exit(&mpt->m_tx_waitq_mutex);
8827 				mptsas_set_pkt_reason(mpt, cmd,
8828 				    reason, stat);
8829 				mptsas_doneq_add(mpt, cmd);
8830 				mutex_enter(&mpt->m_tx_waitq_mutex);
8831 			}
8832 			cmd = next_cmd;
8833 		}
8834 		mutex_exit(&mpt->m_tx_waitq_mutex);
8835 		break;
8836 	default:
8837 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8838 		    tasktype);
8839 		break;
8840 	}
8841 }
8842 
8843 /*
8844  * Clean up hba state, abort all outstanding command and commands in waitq
8845  * reset timeout of all targets.
8846  */
8847 static void
8848 mptsas_flush_hba(mptsas_t *mpt)
8849 {
8850 	mptsas_slots_t	*slots = mpt->m_active;
8851 	mptsas_cmd_t	*cmd;
8852 	int		slot;
8853 
8854 	NDBG25(("mptsas_flush_hba"));
8855 
8856 	/*
8857 	 * The I/O Controller should have already sent back
8858 	 * all commands via the scsi I/O reply frame.  Make
8859 	 * sure all commands have been flushed.
8860 	 * Account for TM request, which use the last SMID.
8861 	 */
8862 	for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8863 		if ((cmd = slots->m_slot[slot]) == NULL)
8864 			continue;
8865 
8866 		if (cmd->cmd_flags & CFLAG_CMDIOC) {
8867 			/*
8868 			 * Need to make sure to tell everyone that might be
8869 			 * waiting on this command that it's going to fail.  If
8870 			 * we get here, this command will never timeout because
8871 			 * the active command table is going to be re-allocated,
8872 			 * so there will be nothing to check against a time out.
8873 			 * Instead, mark the command as failed due to reset.
8874 			 */
8875 			mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8876 			    STAT_BUS_RESET);
8877 			if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8878 			    (cmd->cmd_flags & CFLAG_CONFIG) ||
8879 			    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8880 				cmd->cmd_flags |= CFLAG_FINISHED;
8881 				cv_broadcast(&mpt->m_passthru_cv);
8882 				cv_broadcast(&mpt->m_config_cv);
8883 				cv_broadcast(&mpt->m_fw_diag_cv);
8884 			}
8885 			continue;
8886 		}
8887 
8888 		NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8889 		    slot));
8890 		mptsas_dump_cmd(mpt, cmd);
8891 
8892 		mptsas_remove_cmd(mpt, cmd);
8893 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8894 		mptsas_doneq_add(mpt, cmd);
8895 	}
8896 
8897 	/*
8898 	 * Flush the waitq.
8899 	 */
8900 	while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8901 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8902 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8903 		    (cmd->cmd_flags & CFLAG_CONFIG) ||
8904 		    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8905 			cmd->cmd_flags |= CFLAG_FINISHED;
8906 			cv_broadcast(&mpt->m_passthru_cv);
8907 			cv_broadcast(&mpt->m_config_cv);
8908 			cv_broadcast(&mpt->m_fw_diag_cv);
8909 		} else {
8910 			mptsas_doneq_add(mpt, cmd);
8911 		}
8912 	}
8913 
8914 	/*
8915 	 * Flush the tx_waitq
8916 	 */
8917 	mutex_enter(&mpt->m_tx_waitq_mutex);
8918 	while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8919 		mutex_exit(&mpt->m_tx_waitq_mutex);
8920 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8921 		mptsas_doneq_add(mpt, cmd);
8922 		mutex_enter(&mpt->m_tx_waitq_mutex);
8923 	}
8924 	mutex_exit(&mpt->m_tx_waitq_mutex);
8925 }
8926 
8927 /*
8928  * set pkt_reason and OR in pkt_statistics flag
8929  */
8930 static void
8931 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8932     uint_t stat)
8933 {
8934 #ifndef __lock_lint
8935 	_NOTE(ARGUNUSED(mpt))
8936 #endif
8937 
8938 	NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8939 	    (void *)cmd, reason, stat));
8940 
8941 	if (cmd) {
8942 		if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8943 			cmd->cmd_pkt->pkt_reason = reason;
8944 		}
8945 		cmd->cmd_pkt->pkt_statistics |= stat;
8946 	}
8947 }
8948 
8949 static void
8950 mptsas_start_watch_reset_delay()
8951 {
8952 	NDBG22(("mptsas_start_watch_reset_delay"));
8953 
8954 	mutex_enter(&mptsas_global_mutex);
8955 	if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8956 		mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8957 		    drv_usectohz((clock_t)
8958 		    MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8959 		ASSERT(mptsas_reset_watch != NULL);
8960 	}
8961 	mutex_exit(&mptsas_global_mutex);
8962 }
8963 
8964 static void
8965 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8966 {
8967 	mptsas_target_t	*ptgt = NULL;
8968 
8969 	NDBG22(("mptsas_setup_bus_reset_delay"));
8970 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8971 	    MPTSAS_HASH_FIRST);
8972 	while (ptgt != NULL) {
8973 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8974 		ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8975 
8976 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8977 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8978 	}
8979 
8980 	mptsas_start_watch_reset_delay();
8981 }
8982 
8983 /*
8984  * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8985  * mpt instance for active reset delays
8986  */
8987 static void
8988 mptsas_watch_reset_delay(void *arg)
8989 {
8990 #ifndef __lock_lint
8991 	_NOTE(ARGUNUSED(arg))
8992 #endif
8993 
8994 	mptsas_t	*mpt;
8995 	int		not_done = 0;
8996 
8997 	NDBG22(("mptsas_watch_reset_delay"));
8998 
8999 	mutex_enter(&mptsas_global_mutex);
9000 	mptsas_reset_watch = 0;
9001 	mutex_exit(&mptsas_global_mutex);
9002 	rw_enter(&mptsas_global_rwlock, RW_READER);
9003 	for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9004 		if (mpt->m_tran == 0) {
9005 			continue;
9006 		}
9007 		mutex_enter(&mpt->m_mutex);
9008 		not_done += mptsas_watch_reset_delay_subr(mpt);
9009 		mutex_exit(&mpt->m_mutex);
9010 	}
9011 	rw_exit(&mptsas_global_rwlock);
9012 
9013 	if (not_done) {
9014 		mptsas_start_watch_reset_delay();
9015 	}
9016 }
9017 
9018 static int
9019 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9020 {
9021 	int		done = 0;
9022 	int		restart = 0;
9023 	mptsas_target_t	*ptgt = NULL;
9024 
9025 	NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9026 
9027 	ASSERT(mutex_owned(&mpt->m_mutex));
9028 
9029 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9030 	    MPTSAS_HASH_FIRST);
9031 	while (ptgt != NULL) {
9032 		if (ptgt->m_reset_delay != 0) {
9033 			ptgt->m_reset_delay -=
9034 			    MPTSAS_WATCH_RESET_DELAY_TICK;
9035 			if (ptgt->m_reset_delay <= 0) {
9036 				ptgt->m_reset_delay = 0;
9037 				mptsas_set_throttle(mpt, ptgt,
9038 				    MAX_THROTTLE);
9039 				restart++;
9040 			} else {
9041 				done = -1;
9042 			}
9043 		}
9044 
9045 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9046 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9047 	}
9048 
9049 	if (restart > 0) {
9050 		mptsas_restart_hba(mpt);
9051 	}
9052 	return (done);
9053 }
9054 
9055 #ifdef MPTSAS_TEST
9056 static void
9057 mptsas_test_reset(mptsas_t *mpt, int target)
9058 {
9059 	mptsas_target_t    *ptgt = NULL;
9060 
9061 	if (mptsas_rtest == target) {
9062 		if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9063 			mptsas_rtest = -1;
9064 		}
9065 		if (mptsas_rtest == -1) {
9066 			NDBG22(("mptsas_test_reset success"));
9067 		}
9068 	}
9069 }
9070 #endif
9071 
9072 /*
9073  * abort handling:
9074  *
9075  * Notes:
9076  *	- if pkt is not NULL, abort just that command
9077  *	- if pkt is NULL, abort all outstanding commands for target
9078  */
9079 static int
9080 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9081 {
9082 	mptsas_t		*mpt = ADDR2MPT(ap);
9083 	int			rval;
9084 	mptsas_tgt_private_t	*tgt_private;
9085 	int			target, lun;
9086 
9087 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9088 	    tran_tgt_private;
9089 	ASSERT(tgt_private != NULL);
9090 	target = tgt_private->t_private->m_devhdl;
9091 	lun = tgt_private->t_lun;
9092 
9093 	NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9094 
9095 	mutex_enter(&mpt->m_mutex);
9096 	rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9097 	mutex_exit(&mpt->m_mutex);
9098 	return (rval);
9099 }
9100 
9101 static int
9102 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9103 {
9104 	mptsas_cmd_t	*sp = NULL;
9105 	mptsas_slots_t	*slots = mpt->m_active;
9106 	int		rval = FALSE;
9107 
9108 	ASSERT(mutex_owned(&mpt->m_mutex));
9109 
9110 	/*
9111 	 * Abort the command pkt on the target/lun in ap.  If pkt is
9112 	 * NULL, abort all outstanding commands on that target/lun.
9113 	 * If you can abort them, return 1, else return 0.
9114 	 * Each packet that's aborted should be sent back to the target
9115 	 * driver through the callback routine, with pkt_reason set to
9116 	 * CMD_ABORTED.
9117 	 *
9118 	 * abort cmd pkt on HBA hardware; clean out of outstanding
9119 	 * command lists, etc.
9120 	 */
9121 	if (pkt != NULL) {
9122 		/* abort the specified packet */
9123 		sp = PKT2CMD(pkt);
9124 
9125 		if (sp->cmd_queued) {
9126 			NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9127 			    (void *)sp));
9128 			mptsas_waitq_delete(mpt, sp);
9129 			mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9130 			    STAT_ABORTED);
9131 			mptsas_doneq_add(mpt, sp);
9132 			rval = TRUE;
9133 			goto done;
9134 		}
9135 
9136 		/*
9137 		 * Have mpt firmware abort this command
9138 		 */
9139 
9140 		if (slots->m_slot[sp->cmd_slot] != NULL) {
9141 			rval = mptsas_ioc_task_management(mpt,
9142 			    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9143 			    lun, NULL, 0, 0);
9144 
9145 			/*
9146 			 * The transport layer expects only TRUE and FALSE.
9147 			 * Therefore, if mptsas_ioc_task_management returns
9148 			 * FAILED we will return FALSE.
9149 			 */
9150 			if (rval == FAILED)
9151 				rval = FALSE;
9152 			goto done;
9153 		}
9154 	}
9155 
9156 	/*
9157 	 * If pkt is NULL then abort task set
9158 	 */
9159 	rval = mptsas_ioc_task_management(mpt,
9160 	    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9161 
9162 	/*
9163 	 * The transport layer expects only TRUE and FALSE.
9164 	 * Therefore, if mptsas_ioc_task_management returns
9165 	 * FAILED we will return FALSE.
9166 	 */
9167 	if (rval == FAILED)
9168 		rval = FALSE;
9169 
9170 #ifdef MPTSAS_TEST
9171 	if (rval && mptsas_test_stop) {
9172 		debug_enter("mptsas_do_scsi_abort");
9173 	}
9174 #endif
9175 
9176 done:
9177 	mptsas_doneq_empty(mpt);
9178 	return (rval);
9179 }
9180 
9181 /*
9182  * capability handling:
9183  * (*tran_getcap).  Get the capability named, and return its value.
9184  */
9185 static int
9186 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9187 {
9188 	mptsas_t	*mpt = ADDR2MPT(ap);
9189 	int		ckey;
9190 	int		rval = FALSE;
9191 
9192 	NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9193 	    ap->a_target, cap, tgtonly));
9194 
9195 	mutex_enter(&mpt->m_mutex);
9196 
9197 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9198 		mutex_exit(&mpt->m_mutex);
9199 		return (UNDEFINED);
9200 	}
9201 
9202 	switch (ckey) {
9203 	case SCSI_CAP_DMA_MAX:
9204 		rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9205 		break;
9206 	case SCSI_CAP_ARQ:
9207 		rval = TRUE;
9208 		break;
9209 	case SCSI_CAP_MSG_OUT:
9210 	case SCSI_CAP_PARITY:
9211 	case SCSI_CAP_UNTAGGED_QING:
9212 		rval = TRUE;
9213 		break;
9214 	case SCSI_CAP_TAGGED_QING:
9215 		rval = TRUE;
9216 		break;
9217 	case SCSI_CAP_RESET_NOTIFICATION:
9218 		rval = TRUE;
9219 		break;
9220 	case SCSI_CAP_LINKED_CMDS:
9221 		rval = FALSE;
9222 		break;
9223 	case SCSI_CAP_QFULL_RETRIES:
9224 		rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9225 		    tran_tgt_private))->t_private->m_qfull_retries;
9226 		break;
9227 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
9228 		rval = drv_hztousec(((mptsas_tgt_private_t *)
9229 		    (ap->a_hba_tran->tran_tgt_private))->
9230 		    t_private->m_qfull_retry_interval) / 1000;
9231 		break;
9232 	case SCSI_CAP_CDB_LEN:
9233 		rval = CDB_GROUP4;
9234 		break;
9235 	case SCSI_CAP_INTERCONNECT_TYPE:
9236 		rval = INTERCONNECT_SAS;
9237 		break;
9238 	case SCSI_CAP_TRAN_LAYER_RETRIES:
9239 		if (mpt->m_ioc_capabilities &
9240 		    MPI2_IOCFACTS_CAPABILITY_TLR)
9241 			rval = TRUE;
9242 		else
9243 			rval = FALSE;
9244 		break;
9245 	default:
9246 		rval = UNDEFINED;
9247 		break;
9248 	}
9249 
9250 	NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9251 
9252 	mutex_exit(&mpt->m_mutex);
9253 	return (rval);
9254 }
9255 
9256 /*
9257  * (*tran_setcap).  Set the capability named to the value given.
9258  */
9259 static int
9260 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9261 {
9262 	mptsas_t	*mpt = ADDR2MPT(ap);
9263 	int		ckey;
9264 	int		rval = FALSE;
9265 
9266 	NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9267 	    ap->a_target, cap, value, tgtonly));
9268 
9269 	if (!tgtonly) {
9270 		return (rval);
9271 	}
9272 
9273 	mutex_enter(&mpt->m_mutex);
9274 
9275 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9276 		mutex_exit(&mpt->m_mutex);
9277 		return (UNDEFINED);
9278 	}
9279 
9280 	switch (ckey) {
9281 	case SCSI_CAP_DMA_MAX:
9282 	case SCSI_CAP_MSG_OUT:
9283 	case SCSI_CAP_PARITY:
9284 	case SCSI_CAP_INITIATOR_ID:
9285 	case SCSI_CAP_LINKED_CMDS:
9286 	case SCSI_CAP_UNTAGGED_QING:
9287 	case SCSI_CAP_RESET_NOTIFICATION:
9288 		/*
9289 		 * None of these are settable via
9290 		 * the capability interface.
9291 		 */
9292 		break;
9293 	case SCSI_CAP_ARQ:
9294 		/*
9295 		 * We cannot turn off arq so return false if asked to
9296 		 */
9297 		if (value) {
9298 			rval = TRUE;
9299 		} else {
9300 			rval = FALSE;
9301 		}
9302 		break;
9303 	case SCSI_CAP_TAGGED_QING:
9304 		mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9305 		    (ap->a_hba_tran->tran_tgt_private))->t_private,
9306 		    MAX_THROTTLE);
9307 		rval = TRUE;
9308 		break;
9309 	case SCSI_CAP_QFULL_RETRIES:
9310 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9311 		    t_private->m_qfull_retries = (uchar_t)value;
9312 		rval = TRUE;
9313 		break;
9314 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
9315 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9316 		    t_private->m_qfull_retry_interval =
9317 		    drv_usectohz(value * 1000);
9318 		rval = TRUE;
9319 		break;
9320 	default:
9321 		rval = UNDEFINED;
9322 		break;
9323 	}
9324 	mutex_exit(&mpt->m_mutex);
9325 	return (rval);
9326 }
9327 
9328 /*
9329  * Utility routine for mptsas_ifsetcap/ifgetcap
9330  */
9331 /*ARGSUSED*/
9332 static int
9333 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9334 {
9335 	NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9336 
9337 	if (!cap)
9338 		return (FALSE);
9339 
9340 	*cidxp = scsi_hba_lookup_capstr(cap);
9341 	return (TRUE);
9342 }
9343 
9344 static int
9345 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9346 {
9347 	mptsas_slots_t	*old_active = mpt->m_active;
9348 	mptsas_slots_t	*new_active;
9349 	size_t		size;
9350 	int		rval = -1;
9351 
9352 	if (mpt->m_ncmds) {
9353 		NDBG9(("cannot change size of active slots array"));
9354 		return (rval);
9355 	}
9356 
9357 	size = MPTSAS_SLOTS_SIZE(mpt);
9358 	new_active = kmem_zalloc(size, flag);
9359 	if (new_active == NULL) {
9360 		NDBG1(("new active alloc failed"));
9361 	} else {
9362 		/*
9363 		 * Since SMID 0 is reserved and the TM slot is reserved, the
9364 		 * number of slots that can be used at any one time is
9365 		 * m_max_requests - 2.
9366 		 */
9367 		mpt->m_active = new_active;
9368 		mpt->m_active->m_n_slots = (mpt->m_max_requests - 2);
9369 		mpt->m_active->m_size = size;
9370 		mpt->m_active->m_tags = 1;
9371 		if (old_active) {
9372 			kmem_free(old_active, old_active->m_size);
9373 		}
9374 		rval = 0;
9375 	}
9376 
9377 	return (rval);
9378 }
9379 
9380 /*
9381  * Error logging, printing, and debug print routines.
9382  */
9383 static char *mptsas_label = "mpt_sas";
9384 
9385 /*PRINTFLIKE3*/
9386 void
9387 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9388 {
9389 	dev_info_t	*dev;
9390 	va_list		ap;
9391 
9392 	if (mpt) {
9393 		dev = mpt->m_dip;
9394 	} else {
9395 		dev = 0;
9396 	}
9397 
9398 	mutex_enter(&mptsas_log_mutex);
9399 
9400 	va_start(ap, fmt);
9401 	(void) vsprintf(mptsas_log_buf, fmt, ap);
9402 	va_end(ap);
9403 
9404 	if (level == CE_CONT) {
9405 		scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9406 	} else {
9407 		scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9408 	}
9409 
9410 	mutex_exit(&mptsas_log_mutex);
9411 }
9412 
9413 #ifdef MPTSAS_DEBUG
9414 /*PRINTFLIKE1*/
9415 void
9416 mptsas_printf(char *fmt, ...)
9417 {
9418 	dev_info_t	*dev = 0;
9419 	va_list		ap;
9420 
9421 	mutex_enter(&mptsas_log_mutex);
9422 
9423 	va_start(ap, fmt);
9424 	(void) vsprintf(mptsas_log_buf, fmt, ap);
9425 	va_end(ap);
9426 
9427 #ifdef PROM_PRINTF
9428 	prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9429 #else
9430 	scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9431 #endif
9432 	mutex_exit(&mptsas_log_mutex);
9433 }
9434 #endif
9435 
9436 /*
9437  * timeout handling
9438  */
9439 static void
9440 mptsas_watch(void *arg)
9441 {
9442 #ifndef __lock_lint
9443 	_NOTE(ARGUNUSED(arg))
9444 #endif
9445 
9446 	mptsas_t	*mpt;
9447 	uint32_t	doorbell;
9448 
9449 	NDBG30(("mptsas_watch"));
9450 
9451 	rw_enter(&mptsas_global_rwlock, RW_READER);
9452 	for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9453 
9454 		mutex_enter(&mpt->m_mutex);
9455 
9456 		/* Skip device if not powered on */
9457 		if (mpt->m_options & MPTSAS_OPT_PM) {
9458 			if (mpt->m_power_level == PM_LEVEL_D0) {
9459 				(void) pm_busy_component(mpt->m_dip, 0);
9460 				mpt->m_busy = 1;
9461 			} else {
9462 				mutex_exit(&mpt->m_mutex);
9463 				continue;
9464 			}
9465 		}
9466 
9467 		/*
9468 		 * Check if controller is in a FAULT state. If so, reset it.
9469 		 */
9470 		doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9471 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9472 			doorbell &= MPI2_DOORBELL_DATA_MASK;
9473 			mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9474 			    "code: %04x", doorbell);
9475 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9476 				mptsas_log(mpt, CE_WARN, "Reset failed"
9477 				    "after fault was detected");
9478 			}
9479 		}
9480 
9481 		/*
9482 		 * For now, always call mptsas_watchsubr.
9483 		 */
9484 		mptsas_watchsubr(mpt);
9485 
9486 		if (mpt->m_options & MPTSAS_OPT_PM) {
9487 			mpt->m_busy = 0;
9488 			(void) pm_idle_component(mpt->m_dip, 0);
9489 		}
9490 
9491 		mutex_exit(&mpt->m_mutex);
9492 	}
9493 	rw_exit(&mptsas_global_rwlock);
9494 
9495 	mutex_enter(&mptsas_global_mutex);
9496 	if (mptsas_timeouts_enabled)
9497 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9498 	mutex_exit(&mptsas_global_mutex);
9499 }
9500 
9501 static void
9502 mptsas_watchsubr(mptsas_t *mpt)
9503 {
9504 	int		i;
9505 	mptsas_cmd_t	*cmd;
9506 	mptsas_target_t	*ptgt = NULL;
9507 
9508 	NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9509 
9510 #ifdef MPTSAS_TEST
9511 	if (mptsas_enable_untagged) {
9512 		mptsas_test_untagged++;
9513 	}
9514 #endif
9515 
9516 	/*
9517 	 * Check for commands stuck in active slot
9518 	 * Account for TM requests, which use the last SMID.
9519 	 */
9520 	for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9521 		if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9522 			if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9523 				cmd->cmd_active_timeout -=
9524 				    mptsas_scsi_watchdog_tick;
9525 				if (cmd->cmd_active_timeout <= 0) {
9526 					/*
9527 					 * There seems to be a command stuck
9528 					 * in the active slot.  Drain throttle.
9529 					 */
9530 					mptsas_set_throttle(mpt,
9531 					    cmd->cmd_tgt_addr,
9532 					    DRAIN_THROTTLE);
9533 				}
9534 			}
9535 			if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9536 			    (cmd->cmd_flags & CFLAG_CONFIG) ||
9537 			    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9538 				cmd->cmd_active_timeout -=
9539 				    mptsas_scsi_watchdog_tick;
9540 				if (cmd->cmd_active_timeout <= 0) {
9541 					/*
9542 					 * passthrough command timeout
9543 					 */
9544 					cmd->cmd_flags |= (CFLAG_FINISHED |
9545 					    CFLAG_TIMEOUT);
9546 					cv_broadcast(&mpt->m_passthru_cv);
9547 					cv_broadcast(&mpt->m_config_cv);
9548 					cv_broadcast(&mpt->m_fw_diag_cv);
9549 				}
9550 			}
9551 		}
9552 	}
9553 
9554 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9555 	    MPTSAS_HASH_FIRST);
9556 	while (ptgt != NULL) {
9557 		/*
9558 		 * If we were draining due to a qfull condition,
9559 		 * go back to full throttle.
9560 		 */
9561 		if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9562 		    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9563 		    (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9564 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9565 			mptsas_restart_hba(mpt);
9566 		}
9567 
9568 		if ((ptgt->m_t_ncmds > 0) &&
9569 		    (ptgt->m_timebase)) {
9570 
9571 			if (ptgt->m_timebase <=
9572 			    mptsas_scsi_watchdog_tick) {
9573 				ptgt->m_timebase +=
9574 				    mptsas_scsi_watchdog_tick;
9575 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9576 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9577 				continue;
9578 			}
9579 
9580 			ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9581 
9582 			if (ptgt->m_timeout < 0) {
9583 				mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9584 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9585 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9586 				continue;
9587 			}
9588 
9589 			if ((ptgt->m_timeout) <=
9590 			    mptsas_scsi_watchdog_tick) {
9591 				NDBG23(("pending timeout"));
9592 				mptsas_set_throttle(mpt, ptgt,
9593 				    DRAIN_THROTTLE);
9594 			}
9595 		}
9596 
9597 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9598 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9599 	}
9600 }
9601 
9602 /*
9603  * timeout recovery
9604  */
9605 static void
9606 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9607 {
9608 
9609 	NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9610 	mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9611 	    "Target %d", devhdl);
9612 
9613 	/*
9614 	 * If the current target is not the target passed in,
9615 	 * try to reset that target.
9616 	 */
9617 	NDBG29(("mptsas_cmd_timeout: device reset"));
9618 	if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9619 		mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9620 		    "recovery failed!", devhdl);
9621 	}
9622 }
9623 
9624 /*
9625  * Device / Hotplug control
9626  */
9627 static int
9628 mptsas_scsi_quiesce(dev_info_t *dip)
9629 {
9630 	mptsas_t	*mpt;
9631 	scsi_hba_tran_t	*tran;
9632 
9633 	tran = ddi_get_driver_private(dip);
9634 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9635 		return (-1);
9636 
9637 	return (mptsas_quiesce_bus(mpt));
9638 }
9639 
9640 static int
9641 mptsas_scsi_unquiesce(dev_info_t *dip)
9642 {
9643 	mptsas_t		*mpt;
9644 	scsi_hba_tran_t	*tran;
9645 
9646 	tran = ddi_get_driver_private(dip);
9647 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9648 		return (-1);
9649 
9650 	return (mptsas_unquiesce_bus(mpt));
9651 }
9652 
9653 static int
9654 mptsas_quiesce_bus(mptsas_t *mpt)
9655 {
9656 	mptsas_target_t	*ptgt = NULL;
9657 
9658 	NDBG28(("mptsas_quiesce_bus"));
9659 	mutex_enter(&mpt->m_mutex);
9660 
9661 	/* Set all the throttles to zero */
9662 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9663 	    MPTSAS_HASH_FIRST);
9664 	while (ptgt != NULL) {
9665 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9666 
9667 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9668 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9669 	}
9670 
9671 	/* If there are any outstanding commands in the queue */
9672 	if (mpt->m_ncmds) {
9673 		mpt->m_softstate |= MPTSAS_SS_DRAINING;
9674 		mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9675 		    mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9676 		if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9677 			/*
9678 			 * Quiesce has been interrupted
9679 			 */
9680 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9681 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9682 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9683 			while (ptgt != NULL) {
9684 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9685 
9686 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9687 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9688 			}
9689 			mptsas_restart_hba(mpt);
9690 			if (mpt->m_quiesce_timeid != 0) {
9691 				timeout_id_t tid = mpt->m_quiesce_timeid;
9692 				mpt->m_quiesce_timeid = 0;
9693 				mutex_exit(&mpt->m_mutex);
9694 				(void) untimeout(tid);
9695 				return (-1);
9696 			}
9697 			mutex_exit(&mpt->m_mutex);
9698 			return (-1);
9699 		} else {
9700 			/* Bus has been quiesced */
9701 			ASSERT(mpt->m_quiesce_timeid == 0);
9702 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9703 			mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9704 			mutex_exit(&mpt->m_mutex);
9705 			return (0);
9706 		}
9707 	}
9708 	/* Bus was not busy - QUIESCED */
9709 	mutex_exit(&mpt->m_mutex);
9710 
9711 	return (0);
9712 }
9713 
9714 static int
9715 mptsas_unquiesce_bus(mptsas_t *mpt)
9716 {
9717 	mptsas_target_t	*ptgt = NULL;
9718 
9719 	NDBG28(("mptsas_unquiesce_bus"));
9720 	mutex_enter(&mpt->m_mutex);
9721 	mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9722 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9723 	    MPTSAS_HASH_FIRST);
9724 	while (ptgt != NULL) {
9725 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9726 
9727 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9728 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9729 	}
9730 	mptsas_restart_hba(mpt);
9731 	mutex_exit(&mpt->m_mutex);
9732 	return (0);
9733 }
9734 
9735 static void
9736 mptsas_ncmds_checkdrain(void *arg)
9737 {
9738 	mptsas_t	*mpt = arg;
9739 	mptsas_target_t	*ptgt = NULL;
9740 
9741 	mutex_enter(&mpt->m_mutex);
9742 	if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9743 		mpt->m_quiesce_timeid = 0;
9744 		if (mpt->m_ncmds == 0) {
9745 			/* Command queue has been drained */
9746 			cv_signal(&mpt->m_cv);
9747 		} else {
9748 			/*
9749 			 * The throttle may have been reset because
9750 			 * of a SCSI bus reset
9751 			 */
9752 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9753 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9754 			while (ptgt != NULL) {
9755 				mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9756 
9757 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9758 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9759 			}
9760 
9761 			mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9762 			    mpt, (MPTSAS_QUIESCE_TIMEOUT *
9763 			    drv_usectohz(1000000)));
9764 		}
9765 	}
9766 	mutex_exit(&mpt->m_mutex);
9767 }
9768 
9769 /*ARGSUSED*/
9770 static void
9771 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9772 {
9773 	int	i;
9774 	uint8_t	*cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9775 	char	buf[128];
9776 
9777 	buf[0] = '\0';
9778 	NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9779 	    Tgt(cmd), Lun(cmd)));
9780 	(void) sprintf(&buf[0], "\tcdb=[");
9781 	for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9782 		(void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9783 	}
9784 	(void) sprintf(&buf[strlen(buf)], " ]");
9785 	NDBG25(("?%s\n", buf));
9786 	NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9787 	    cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9788 	    cmd->cmd_pkt->pkt_state));
9789 	NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", *(cmd->cmd_pkt->pkt_scbp),
9790 	    cmd->cmd_flags));
9791 }
9792 
9793 static void
9794 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9795 {
9796 	caddr_t			memp;
9797 	pMPI2RequestHeader_t	request_hdrp;
9798 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
9799 	mptsas_pt_request_t	*pt = pkt->pkt_ha_private;
9800 	uint32_t		request_size, data_size, dataout_size;
9801 	uint32_t		direction;
9802 	ddi_dma_cookie_t	data_cookie;
9803 	ddi_dma_cookie_t	dataout_cookie;
9804 	uint32_t		request_desc_low, request_desc_high = 0;
9805 	uint32_t		i, sense_bufp;
9806 	uint8_t			desc_type;
9807 	uint8_t			*request, function;
9808 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
9809 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
9810 
9811 	desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9812 
9813 	request = pt->request;
9814 	direction = pt->direction;
9815 	request_size = pt->request_size;
9816 	data_size = pt->data_size;
9817 	dataout_size = pt->dataout_size;
9818 	data_cookie = pt->data_cookie;
9819 	dataout_cookie = pt->dataout_cookie;
9820 
9821 	/*
9822 	 * Store the passthrough message in memory location
9823 	 * corresponding to our slot number
9824 	 */
9825 	memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9826 	request_hdrp = (pMPI2RequestHeader_t)memp;
9827 	bzero(memp, mpt->m_req_frame_size);
9828 
9829 	for (i = 0; i < request_size; i++) {
9830 		bcopy(request + i, memp + i, 1);
9831 	}
9832 
9833 	if (data_size || dataout_size) {
9834 		pMpi2SGESimple64_t	sgep;
9835 		uint32_t		sge_flags;
9836 
9837 		sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9838 		    request_size);
9839 		if (dataout_size) {
9840 
9841 			sge_flags = dataout_size |
9842 			    ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9843 			    MPI2_SGE_FLAGS_END_OF_BUFFER |
9844 			    MPI2_SGE_FLAGS_HOST_TO_IOC |
9845 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9846 			    MPI2_SGE_FLAGS_SHIFT);
9847 			ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9848 			ddi_put32(acc_hdl, &sgep->Address.Low,
9849 			    (uint32_t)(dataout_cookie.dmac_laddress &
9850 			    0xffffffffull));
9851 			ddi_put32(acc_hdl, &sgep->Address.High,
9852 			    (uint32_t)(dataout_cookie.dmac_laddress
9853 			    >> 32));
9854 			sgep++;
9855 		}
9856 		sge_flags = data_size;
9857 		sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9858 		    MPI2_SGE_FLAGS_LAST_ELEMENT |
9859 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
9860 		    MPI2_SGE_FLAGS_END_OF_LIST |
9861 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9862 		    MPI2_SGE_FLAGS_SHIFT);
9863 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9864 			sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9865 			    MPI2_SGE_FLAGS_SHIFT);
9866 		} else {
9867 			sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9868 			    MPI2_SGE_FLAGS_SHIFT);
9869 		}
9870 		ddi_put32(acc_hdl, &sgep->FlagsLength,
9871 		    sge_flags);
9872 		ddi_put32(acc_hdl, &sgep->Address.Low,
9873 		    (uint32_t)(data_cookie.dmac_laddress &
9874 		    0xffffffffull));
9875 		ddi_put32(acc_hdl, &sgep->Address.High,
9876 		    (uint32_t)(data_cookie.dmac_laddress >> 32));
9877 	}
9878 
9879 	function = request_hdrp->Function;
9880 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9881 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9882 		pMpi2SCSIIORequest_t	scsi_io_req;
9883 
9884 		scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9885 		/*
9886 		 * Put SGE for data and data_out buffer at the end of
9887 		 * scsi_io_request message header.(64 bytes in total)
9888 		 * Following above SGEs, the residual space will be
9889 		 * used by sense data.
9890 		 */
9891 		ddi_put8(acc_hdl,
9892 		    &scsi_io_req->SenseBufferLength,
9893 		    (uint8_t)(request_size - 64));
9894 
9895 		sense_bufp = mpt->m_req_frame_dma_addr +
9896 		    (mpt->m_req_frame_size * cmd->cmd_slot);
9897 		sense_bufp += 64;
9898 		ddi_put32(acc_hdl,
9899 		    &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9900 
9901 		/*
9902 		 * Set SGLOffset0 value
9903 		 */
9904 		ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9905 		    offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9906 
9907 		/*
9908 		 * Setup descriptor info.  RAID passthrough must use the
9909 		 * default request descriptor which is already set, so if this
9910 		 * is a SCSI IO request, change the descriptor to SCSI IO.
9911 		 */
9912 		if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9913 			desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9914 			request_desc_high = (ddi_get16(acc_hdl,
9915 			    &scsi_io_req->DevHandle) << 16);
9916 		}
9917 	}
9918 
9919 	/*
9920 	 * We must wait till the message has been completed before
9921 	 * beginning the next message so we wait for this one to
9922 	 * finish.
9923 	 */
9924 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9925 	request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9926 	cmd->cmd_rfm = NULL;
9927 	MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9928 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9929 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9930 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9931 	}
9932 }
9933 
9934 
9935 
9936 static int
9937 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9938     uint8_t *data, uint32_t request_size, uint32_t reply_size,
9939     uint32_t data_size, uint32_t direction, uint8_t *dataout,
9940     uint32_t dataout_size, short timeout, int mode)
9941 {
9942 	mptsas_pt_request_t		pt;
9943 	mptsas_dma_alloc_state_t	data_dma_state;
9944 	mptsas_dma_alloc_state_t	dataout_dma_state;
9945 	caddr_t				memp;
9946 	mptsas_cmd_t			*cmd = NULL;
9947 	struct scsi_pkt			*pkt;
9948 	uint32_t			reply_len = 0, sense_len = 0;
9949 	pMPI2RequestHeader_t		request_hdrp;
9950 	pMPI2RequestHeader_t		request_msg;
9951 	pMPI2DefaultReply_t		reply_msg;
9952 	Mpi2SCSIIOReply_t		rep_msg;
9953 	int				i, status = 0, pt_flags = 0, rv = 0;
9954 	int				rvalue;
9955 	uint8_t				function;
9956 
9957 	ASSERT(mutex_owned(&mpt->m_mutex));
9958 
9959 	reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9960 	bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9961 	request_msg = kmem_zalloc(request_size, KM_SLEEP);
9962 
9963 	mutex_exit(&mpt->m_mutex);
9964 	/*
9965 	 * copy in the request buffer since it could be used by
9966 	 * another thread when the pt request into waitq
9967 	 */
9968 	if (ddi_copyin(request, request_msg, request_size, mode)) {
9969 		mutex_enter(&mpt->m_mutex);
9970 		status = EFAULT;
9971 		mptsas_log(mpt, CE_WARN, "failed to copy request data");
9972 		goto out;
9973 	}
9974 	mutex_enter(&mpt->m_mutex);
9975 
9976 	function = request_msg->Function;
9977 	if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9978 		pMpi2SCSITaskManagementRequest_t	task;
9979 		task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9980 		mptsas_setup_bus_reset_delay(mpt);
9981 		rv = mptsas_ioc_task_management(mpt, task->TaskType,
9982 		    task->DevHandle, (int)task->LUN[1], reply, reply_size,
9983 		    mode);
9984 
9985 		if (rv != TRUE) {
9986 			status = EIO;
9987 			mptsas_log(mpt, CE_WARN, "task management failed");
9988 		}
9989 		goto out;
9990 	}
9991 
9992 	if (data_size != 0) {
9993 		data_dma_state.size = data_size;
9994 		if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9995 			status = ENOMEM;
9996 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9997 			    "resource");
9998 			goto out;
9999 		}
10000 		pt_flags |= MPTSAS_DATA_ALLOCATED;
10001 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10002 			mutex_exit(&mpt->m_mutex);
10003 			for (i = 0; i < data_size; i++) {
10004 				if (ddi_copyin(data + i, (uint8_t *)
10005 				    data_dma_state.memp + i, 1, mode)) {
10006 					mutex_enter(&mpt->m_mutex);
10007 					status = EFAULT;
10008 					mptsas_log(mpt, CE_WARN, "failed to "
10009 					    "copy read data");
10010 					goto out;
10011 				}
10012 			}
10013 			mutex_enter(&mpt->m_mutex);
10014 		}
10015 	}
10016 
10017 	if (dataout_size != 0) {
10018 		dataout_dma_state.size = dataout_size;
10019 		if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10020 			status = ENOMEM;
10021 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10022 			    "resource");
10023 			goto out;
10024 		}
10025 		pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10026 		mutex_exit(&mpt->m_mutex);
10027 		for (i = 0; i < dataout_size; i++) {
10028 			if (ddi_copyin(dataout + i, (uint8_t *)
10029 			    dataout_dma_state.memp + i, 1, mode)) {
10030 				mutex_enter(&mpt->m_mutex);
10031 				mptsas_log(mpt, CE_WARN, "failed to copy out"
10032 				    " data");
10033 				status = EFAULT;
10034 				goto out;
10035 			}
10036 		}
10037 		mutex_enter(&mpt->m_mutex);
10038 	}
10039 
10040 	if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10041 		status = EAGAIN;
10042 		mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10043 		goto out;
10044 	}
10045 	pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10046 
10047 	bzero((caddr_t)cmd, sizeof (*cmd));
10048 	bzero((caddr_t)pkt, scsi_pkt_size());
10049 	bzero((caddr_t)&pt, sizeof (pt));
10050 
10051 	cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10052 
10053 	pt.request = (uint8_t *)request_msg;
10054 	pt.direction = direction;
10055 	pt.request_size = request_size;
10056 	pt.data_size = data_size;
10057 	pt.dataout_size = dataout_size;
10058 	pt.data_cookie = data_dma_state.cookie;
10059 	pt.dataout_cookie = dataout_dma_state.cookie;
10060 
10061 	/*
10062 	 * Form a blank cmd/pkt to store the acknowledgement message
10063 	 */
10064 	pkt->pkt_cdbp		= (opaque_t)&cmd->cmd_cdb[0];
10065 	pkt->pkt_scbp		= (opaque_t)&cmd->cmd_scb;
10066 	pkt->pkt_ha_private	= (opaque_t)&pt;
10067 	pkt->pkt_flags		= FLAG_HEAD;
10068 	pkt->pkt_time		= timeout;
10069 	cmd->cmd_pkt		= pkt;
10070 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_PASSTHRU;
10071 
10072 	/*
10073 	 * Save the command in a slot
10074 	 */
10075 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10076 		/*
10077 		 * Once passthru command get slot, set cmd_flags
10078 		 * CFLAG_PREPARED.
10079 		 */
10080 		cmd->cmd_flags |= CFLAG_PREPARED;
10081 		mptsas_start_passthru(mpt, cmd);
10082 	} else {
10083 		mptsas_waitq_add(mpt, cmd);
10084 	}
10085 
10086 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10087 		cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10088 	}
10089 
10090 	if (cmd->cmd_flags & CFLAG_PREPARED) {
10091 		memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10092 		    cmd->cmd_slot);
10093 		request_hdrp = (pMPI2RequestHeader_t)memp;
10094 	}
10095 
10096 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10097 		status = ETIMEDOUT;
10098 		mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10099 		pt_flags |= MPTSAS_CMD_TIMEOUT;
10100 		goto out;
10101 	}
10102 
10103 	if (cmd->cmd_rfm) {
10104 		/*
10105 		 * cmd_rfm is zero means the command reply is a CONTEXT
10106 		 * reply and no PCI Write to post the free reply SMFA
10107 		 * because no reply message frame is used.
10108 		 * cmd_rfm is non-zero means the reply is a ADDRESS
10109 		 * reply and reply message frame is used.
10110 		 */
10111 		pt_flags |= MPTSAS_ADDRESS_REPLY;
10112 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10113 		    DDI_DMA_SYNC_FORCPU);
10114 		reply_msg = (pMPI2DefaultReply_t)
10115 		    (mpt->m_reply_frame + (cmd->cmd_rfm -
10116 		    mpt->m_reply_frame_dma_addr));
10117 	}
10118 
10119 	mptsas_fma_check(mpt, cmd);
10120 	if (pkt->pkt_reason == CMD_TRAN_ERR) {
10121 		status = EAGAIN;
10122 		mptsas_log(mpt, CE_WARN, "passthru fma error");
10123 		goto out;
10124 	}
10125 	if (pkt->pkt_reason == CMD_RESET) {
10126 		status = EAGAIN;
10127 		mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10128 		goto out;
10129 	}
10130 
10131 	if (pkt->pkt_reason == CMD_INCOMPLETE) {
10132 		status = EIO;
10133 		mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10134 		goto out;
10135 	}
10136 
10137 	mutex_exit(&mpt->m_mutex);
10138 	if (cmd->cmd_flags & CFLAG_PREPARED) {
10139 		function = request_hdrp->Function;
10140 		if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10141 		    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10142 			reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10143 			sense_len = reply_size - reply_len;
10144 		} else {
10145 			reply_len = reply_size;
10146 			sense_len = 0;
10147 		}
10148 
10149 		for (i = 0; i < reply_len; i++) {
10150 			if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10151 			    mode)) {
10152 				mutex_enter(&mpt->m_mutex);
10153 				status = EFAULT;
10154 				mptsas_log(mpt, CE_WARN, "failed to copy out "
10155 				    "reply data");
10156 				goto out;
10157 			}
10158 		}
10159 		for (i = 0; i < sense_len; i++) {
10160 			if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10161 			    reply + reply_len + i, 1, mode)) {
10162 				mutex_enter(&mpt->m_mutex);
10163 				status = EFAULT;
10164 				mptsas_log(mpt, CE_WARN, "failed to copy out "
10165 				    "sense data");
10166 				goto out;
10167 			}
10168 		}
10169 	}
10170 
10171 	if (data_size) {
10172 		if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10173 			(void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10174 			    DDI_DMA_SYNC_FORCPU);
10175 			for (i = 0; i < data_size; i++) {
10176 				if (ddi_copyout((uint8_t *)(
10177 				    data_dma_state.memp + i), data + i,  1,
10178 				    mode)) {
10179 					mutex_enter(&mpt->m_mutex);
10180 					status = EFAULT;
10181 					mptsas_log(mpt, CE_WARN, "failed to "
10182 					    "copy out the reply data");
10183 					goto out;
10184 				}
10185 			}
10186 		}
10187 	}
10188 	mutex_enter(&mpt->m_mutex);
10189 out:
10190 	/*
10191 	 * Put the reply frame back on the free queue, increment the free
10192 	 * index, and write the new index to the free index register.  But only
10193 	 * if this reply is an ADDRESS reply.
10194 	 */
10195 	if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10196 		ddi_put32(mpt->m_acc_free_queue_hdl,
10197 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10198 		    cmd->cmd_rfm);
10199 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10200 		    DDI_DMA_SYNC_FORDEV);
10201 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10202 			mpt->m_free_index = 0;
10203 		}
10204 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10205 		    mpt->m_free_index);
10206 	}
10207 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10208 		mptsas_remove_cmd(mpt, cmd);
10209 		pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10210 	}
10211 	if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10212 		mptsas_return_to_pool(mpt, cmd);
10213 	if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10214 		if (mptsas_check_dma_handle(data_dma_state.handle) !=
10215 		    DDI_SUCCESS) {
10216 			ddi_fm_service_impact(mpt->m_dip,
10217 			    DDI_SERVICE_UNAFFECTED);
10218 			status = EFAULT;
10219 		}
10220 		mptsas_dma_free(&data_dma_state);
10221 	}
10222 	if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10223 		if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10224 		    DDI_SUCCESS) {
10225 			ddi_fm_service_impact(mpt->m_dip,
10226 			    DDI_SERVICE_UNAFFECTED);
10227 			status = EFAULT;
10228 		}
10229 		mptsas_dma_free(&dataout_dma_state);
10230 	}
10231 	if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10232 		if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10233 			mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10234 		}
10235 	}
10236 	if (request_msg)
10237 		kmem_free(request_msg, request_size);
10238 
10239 	return (status);
10240 }
10241 
10242 static int
10243 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10244 {
10245 	/*
10246 	 * If timeout is 0, set timeout to default of 60 seconds.
10247 	 */
10248 	if (data->Timeout == 0) {
10249 		data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10250 	}
10251 
10252 	if (((data->DataSize == 0) &&
10253 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10254 	    ((data->DataSize != 0) &&
10255 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10256 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10257 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10258 	    (data->DataOutSize != 0))))) {
10259 		if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10260 			data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10261 		} else {
10262 			data->DataOutSize = 0;
10263 		}
10264 		/*
10265 		 * Send passthru request messages
10266 		 */
10267 		return (mptsas_do_passthru(mpt,
10268 		    (uint8_t *)((uintptr_t)data->PtrRequest),
10269 		    (uint8_t *)((uintptr_t)data->PtrReply),
10270 		    (uint8_t *)((uintptr_t)data->PtrData),
10271 		    data->RequestSize, data->ReplySize,
10272 		    data->DataSize, data->DataDirection,
10273 		    (uint8_t *)((uintptr_t)data->PtrDataOut),
10274 		    data->DataOutSize, data->Timeout, mode));
10275 	} else {
10276 		return (EINVAL);
10277 	}
10278 }
10279 
10280 static uint8_t
10281 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10282 {
10283 	uint8_t	index;
10284 
10285 	for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10286 		if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10287 			return (index);
10288 		}
10289 	}
10290 
10291 	return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10292 }
10293 
10294 static void
10295 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10296 {
10297 	pMpi2DiagBufferPostRequest_t	pDiag_post_msg;
10298 	pMpi2DiagReleaseRequest_t	pDiag_release_msg;
10299 	struct scsi_pkt			*pkt = cmd->cmd_pkt;
10300 	mptsas_diag_request_t		*diag = pkt->pkt_ha_private;
10301 	uint32_t			request_desc_low, i;
10302 
10303 	ASSERT(mutex_owned(&mpt->m_mutex));
10304 
10305 	/*
10306 	 * Form the diag message depending on the post or release function.
10307 	 */
10308 	if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10309 		pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10310 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
10311 		    cmd->cmd_slot));
10312 		bzero(pDiag_post_msg, mpt->m_req_frame_size);
10313 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10314 		    diag->function);
10315 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10316 		    diag->pBuffer->buffer_type);
10317 		ddi_put8(mpt->m_acc_req_frame_hdl,
10318 		    &pDiag_post_msg->ExtendedType,
10319 		    diag->pBuffer->extended_type);
10320 		ddi_put32(mpt->m_acc_req_frame_hdl,
10321 		    &pDiag_post_msg->BufferLength,
10322 		    diag->pBuffer->buffer_data.size);
10323 		for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10324 		    i++) {
10325 			ddi_put32(mpt->m_acc_req_frame_hdl,
10326 			    &pDiag_post_msg->ProductSpecific[i],
10327 			    diag->pBuffer->product_specific[i]);
10328 		}
10329 		ddi_put32(mpt->m_acc_req_frame_hdl,
10330 		    &pDiag_post_msg->BufferAddress.Low,
10331 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10332 		    & 0xffffffffull));
10333 		ddi_put32(mpt->m_acc_req_frame_hdl,
10334 		    &pDiag_post_msg->BufferAddress.High,
10335 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10336 		    >> 32));
10337 	} else {
10338 		pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10339 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
10340 		    cmd->cmd_slot));
10341 		bzero(pDiag_release_msg, mpt->m_req_frame_size);
10342 		ddi_put8(mpt->m_acc_req_frame_hdl,
10343 		    &pDiag_release_msg->Function, diag->function);
10344 		ddi_put8(mpt->m_acc_req_frame_hdl,
10345 		    &pDiag_release_msg->BufferType,
10346 		    diag->pBuffer->buffer_type);
10347 	}
10348 
10349 	/*
10350 	 * Send the message
10351 	 */
10352 	(void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10353 	    DDI_DMA_SYNC_FORDEV);
10354 	request_desc_low = (cmd->cmd_slot << 16) +
10355 	    MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10356 	cmd->cmd_rfm = NULL;
10357 	MPTSAS_START_CMD(mpt, request_desc_low, 0);
10358 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10359 	    DDI_SUCCESS) ||
10360 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10361 	    DDI_SUCCESS)) {
10362 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10363 	}
10364 }
10365 
10366 static int
10367 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10368     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10369 {
10370 	mptsas_diag_request_t		diag;
10371 	int				status, slot_num, post_flags = 0;
10372 	mptsas_cmd_t			*cmd = NULL;
10373 	struct scsi_pkt			*pkt;
10374 	pMpi2DiagBufferPostReply_t	reply;
10375 	uint16_t			iocstatus;
10376 	uint32_t			iocloginfo, transfer_length;
10377 
10378 	/*
10379 	 * If buffer is not enabled, just leave.
10380 	 */
10381 	*return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10382 	if (!pBuffer->enabled) {
10383 		status = DDI_FAILURE;
10384 		goto out;
10385 	}
10386 
10387 	/*
10388 	 * Clear some flags initially.
10389 	 */
10390 	pBuffer->force_release = FALSE;
10391 	pBuffer->valid_data = FALSE;
10392 	pBuffer->owned_by_firmware = FALSE;
10393 
10394 	/*
10395 	 * Get a cmd buffer from the cmd buffer pool
10396 	 */
10397 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10398 		status = DDI_FAILURE;
10399 		mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10400 		goto out;
10401 	}
10402 	post_flags |= MPTSAS_REQUEST_POOL_CMD;
10403 
10404 	bzero((caddr_t)cmd, sizeof (*cmd));
10405 	bzero((caddr_t)pkt, scsi_pkt_size());
10406 
10407 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10408 
10409 	diag.pBuffer = pBuffer;
10410 	diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10411 
10412 	/*
10413 	 * Form a blank cmd/pkt to store the acknowledgement message
10414 	 */
10415 	pkt->pkt_ha_private	= (opaque_t)&diag;
10416 	pkt->pkt_flags		= FLAG_HEAD;
10417 	pkt->pkt_time		= 60;
10418 	cmd->cmd_pkt		= pkt;
10419 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
10420 
10421 	/*
10422 	 * Save the command in a slot
10423 	 */
10424 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10425 		/*
10426 		 * Once passthru command get slot, set cmd_flags
10427 		 * CFLAG_PREPARED.
10428 		 */
10429 		cmd->cmd_flags |= CFLAG_PREPARED;
10430 		mptsas_start_diag(mpt, cmd);
10431 	} else {
10432 		mptsas_waitq_add(mpt, cmd);
10433 	}
10434 
10435 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10436 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10437 	}
10438 
10439 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10440 		status = DDI_FAILURE;
10441 		mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10442 		goto out;
10443 	}
10444 
10445 	/*
10446 	 * cmd_rfm points to the reply message if a reply was given.  Check the
10447 	 * IOCStatus to make sure everything went OK with the FW diag request
10448 	 * and set buffer flags.
10449 	 */
10450 	if (cmd->cmd_rfm) {
10451 		post_flags |= MPTSAS_ADDRESS_REPLY;
10452 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10453 		    DDI_DMA_SYNC_FORCPU);
10454 		reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10455 		    (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10456 
10457 		/*
10458 		 * Get the reply message data
10459 		 */
10460 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10461 		    &reply->IOCStatus);
10462 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10463 		    &reply->IOCLogInfo);
10464 		transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10465 		    &reply->TransferLength);
10466 
10467 		/*
10468 		 * If post failed quit.
10469 		 */
10470 		if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10471 			status = DDI_FAILURE;
10472 			NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10473 			    "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10474 			    iocloginfo, transfer_length));
10475 			goto out;
10476 		}
10477 
10478 		/*
10479 		 * Post was successful.
10480 		 */
10481 		pBuffer->valid_data = TRUE;
10482 		pBuffer->owned_by_firmware = TRUE;
10483 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10484 		status = DDI_SUCCESS;
10485 	}
10486 
10487 out:
10488 	/*
10489 	 * Put the reply frame back on the free queue, increment the free
10490 	 * index, and write the new index to the free index register.  But only
10491 	 * if this reply is an ADDRESS reply.
10492 	 */
10493 	if (post_flags & MPTSAS_ADDRESS_REPLY) {
10494 		ddi_put32(mpt->m_acc_free_queue_hdl,
10495 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10496 		    cmd->cmd_rfm);
10497 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10498 		    DDI_DMA_SYNC_FORDEV);
10499 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10500 			mpt->m_free_index = 0;
10501 		}
10502 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10503 		    mpt->m_free_index);
10504 	}
10505 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10506 		mptsas_remove_cmd(mpt, cmd);
10507 		post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10508 	}
10509 	if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10510 		mptsas_return_to_pool(mpt, cmd);
10511 	}
10512 
10513 	return (status);
10514 }
10515 
10516 static int
10517 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10518     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10519     uint32_t diag_type)
10520 {
10521 	mptsas_diag_request_t	diag;
10522 	int			status, slot_num, rel_flags = 0;
10523 	mptsas_cmd_t		*cmd = NULL;
10524 	struct scsi_pkt		*pkt;
10525 	pMpi2DiagReleaseReply_t	reply;
10526 	uint16_t		iocstatus;
10527 	uint32_t		iocloginfo;
10528 
10529 	/*
10530 	 * If buffer is not enabled, just leave.
10531 	 */
10532 	*return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10533 	if (!pBuffer->enabled) {
10534 		mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10535 		    "by the IOC");
10536 		status = DDI_FAILURE;
10537 		goto out;
10538 	}
10539 
10540 	/*
10541 	 * Clear some flags initially.
10542 	 */
10543 	pBuffer->force_release = FALSE;
10544 	pBuffer->valid_data = FALSE;
10545 	pBuffer->owned_by_firmware = FALSE;
10546 
10547 	/*
10548 	 * Get a cmd buffer from the cmd buffer pool
10549 	 */
10550 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10551 		status = DDI_FAILURE;
10552 		mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10553 		    "Diag");
10554 		goto out;
10555 	}
10556 	rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10557 
10558 	bzero((caddr_t)cmd, sizeof (*cmd));
10559 	bzero((caddr_t)pkt, scsi_pkt_size());
10560 
10561 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10562 
10563 	diag.pBuffer = pBuffer;
10564 	diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10565 
10566 	/*
10567 	 * Form a blank cmd/pkt to store the acknowledgement message
10568 	 */
10569 	pkt->pkt_ha_private	= (opaque_t)&diag;
10570 	pkt->pkt_flags		= FLAG_HEAD;
10571 	pkt->pkt_time		= 60;
10572 	cmd->cmd_pkt		= pkt;
10573 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
10574 
10575 	/*
10576 	 * Save the command in a slot
10577 	 */
10578 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10579 		/*
10580 		 * Once passthru command get slot, set cmd_flags
10581 		 * CFLAG_PREPARED.
10582 		 */
10583 		cmd->cmd_flags |= CFLAG_PREPARED;
10584 		mptsas_start_diag(mpt, cmd);
10585 	} else {
10586 		mptsas_waitq_add(mpt, cmd);
10587 	}
10588 
10589 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10590 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10591 	}
10592 
10593 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10594 		status = DDI_FAILURE;
10595 		mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10596 		goto out;
10597 	}
10598 
10599 	/*
10600 	 * cmd_rfm points to the reply message if a reply was given.  Check the
10601 	 * IOCStatus to make sure everything went OK with the FW diag request
10602 	 * and set buffer flags.
10603 	 */
10604 	if (cmd->cmd_rfm) {
10605 		rel_flags |= MPTSAS_ADDRESS_REPLY;
10606 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10607 		    DDI_DMA_SYNC_FORCPU);
10608 		reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10609 		    (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10610 
10611 		/*
10612 		 * Get the reply message data
10613 		 */
10614 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10615 		    &reply->IOCStatus);
10616 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10617 		    &reply->IOCLogInfo);
10618 
10619 		/*
10620 		 * If release failed quit.
10621 		 */
10622 		if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10623 		    pBuffer->owned_by_firmware) {
10624 			status = DDI_FAILURE;
10625 			NDBG13(("release FW Diag Buffer failed: "
10626 			    "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10627 			    iocloginfo));
10628 			goto out;
10629 		}
10630 
10631 		/*
10632 		 * Release was successful.
10633 		 */
10634 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10635 		status = DDI_SUCCESS;
10636 
10637 		/*
10638 		 * If this was for an UNREGISTER diag type command, clear the
10639 		 * unique ID.
10640 		 */
10641 		if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10642 			pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10643 		}
10644 	}
10645 
10646 out:
10647 	/*
10648 	 * Put the reply frame back on the free queue, increment the free
10649 	 * index, and write the new index to the free index register.  But only
10650 	 * if this reply is an ADDRESS reply.
10651 	 */
10652 	if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10653 		ddi_put32(mpt->m_acc_free_queue_hdl,
10654 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10655 		    cmd->cmd_rfm);
10656 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10657 		    DDI_DMA_SYNC_FORDEV);
10658 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10659 			mpt->m_free_index = 0;
10660 		}
10661 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10662 		    mpt->m_free_index);
10663 	}
10664 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10665 		mptsas_remove_cmd(mpt, cmd);
10666 		rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10667 	}
10668 	if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10669 		mptsas_return_to_pool(mpt, cmd);
10670 	}
10671 
10672 	return (status);
10673 }
10674 
10675 static int
10676 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10677     uint32_t *return_code)
10678 {
10679 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10680 	uint8_t				extended_type, buffer_type, i;
10681 	uint32_t			buffer_size;
10682 	uint32_t			unique_id;
10683 	int				status;
10684 
10685 	ASSERT(mutex_owned(&mpt->m_mutex));
10686 
10687 	extended_type = diag_register->ExtendedType;
10688 	buffer_type = diag_register->BufferType;
10689 	buffer_size = diag_register->RequestedBufferSize;
10690 	unique_id = diag_register->UniqueId;
10691 
10692 	/*
10693 	 * Check for valid buffer type
10694 	 */
10695 	if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10696 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10697 		return (DDI_FAILURE);
10698 	}
10699 
10700 	/*
10701 	 * Get the current buffer and look up the unique ID.  The unique ID
10702 	 * should not be found.  If it is, the ID is already in use.
10703 	 */
10704 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10705 	pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10706 	if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10707 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10708 		return (DDI_FAILURE);
10709 	}
10710 
10711 	/*
10712 	 * The buffer's unique ID should not be registered yet, and the given
10713 	 * unique ID cannot be 0.
10714 	 */
10715 	if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10716 	    (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10717 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10718 		return (DDI_FAILURE);
10719 	}
10720 
10721 	/*
10722 	 * If this buffer is already posted as immediate, just change owner.
10723 	 */
10724 	if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10725 	    (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10726 		pBuffer->immediate = FALSE;
10727 		pBuffer->unique_id = unique_id;
10728 		return (DDI_SUCCESS);
10729 	}
10730 
10731 	/*
10732 	 * Post a new buffer after checking if it's enabled.  The DMA buffer
10733 	 * that is allocated will be contiguous (sgl_len = 1).
10734 	 */
10735 	if (!pBuffer->enabled) {
10736 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10737 		return (DDI_FAILURE);
10738 	}
10739 	bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10740 	pBuffer->buffer_data.size = buffer_size;
10741 	if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10742 		mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10743 		    "diag buffer: size = %d bytes", buffer_size);
10744 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10745 		return (DDI_FAILURE);
10746 	}
10747 
10748 	/*
10749 	 * Copy the given info to the diag buffer and post the buffer.
10750 	 */
10751 	pBuffer->buffer_type = buffer_type;
10752 	pBuffer->immediate = FALSE;
10753 	if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10754 		for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10755 		    i++) {
10756 			pBuffer->product_specific[i] =
10757 			    diag_register->ProductSpecific[i];
10758 		}
10759 	}
10760 	pBuffer->extended_type = extended_type;
10761 	pBuffer->unique_id = unique_id;
10762 	status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10763 
10764 	if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10765 	    DDI_SUCCESS) {
10766 		mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10767 		    "mptsas_diag_register.");
10768 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10769 			status = DDI_FAILURE;
10770 	}
10771 
10772 	/*
10773 	 * In case there was a failure, free the DMA buffer.
10774 	 */
10775 	if (status == DDI_FAILURE) {
10776 		mptsas_dma_free(&pBuffer->buffer_data);
10777 	}
10778 
10779 	return (status);
10780 }
10781 
10782 static int
10783 mptsas_diag_unregister(mptsas_t *mpt,
10784     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10785 {
10786 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10787 	uint8_t				i;
10788 	uint32_t			unique_id;
10789 	int				status;
10790 
10791 	ASSERT(mutex_owned(&mpt->m_mutex));
10792 
10793 	unique_id = diag_unregister->UniqueId;
10794 
10795 	/*
10796 	 * Get the current buffer and look up the unique ID.  The unique ID
10797 	 * should be there.
10798 	 */
10799 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10800 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10801 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10802 		return (DDI_FAILURE);
10803 	}
10804 
10805 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
10806 
10807 	/*
10808 	 * Try to release the buffer from FW before freeing it.  If release
10809 	 * fails, don't free the DMA buffer in case FW tries to access it
10810 	 * later.  If buffer is not owned by firmware, can't release it.
10811 	 */
10812 	if (!pBuffer->owned_by_firmware) {
10813 		status = DDI_SUCCESS;
10814 	} else {
10815 		status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10816 		    return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10817 	}
10818 
10819 	/*
10820 	 * At this point, return the current status no matter what happens with
10821 	 * the DMA buffer.
10822 	 */
10823 	pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10824 	if (status == DDI_SUCCESS) {
10825 		if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10826 		    DDI_SUCCESS) {
10827 			mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10828 			    "in mptsas_diag_unregister.");
10829 			ddi_fm_service_impact(mpt->m_dip,
10830 			    DDI_SERVICE_UNAFFECTED);
10831 		}
10832 		mptsas_dma_free(&pBuffer->buffer_data);
10833 	}
10834 
10835 	return (status);
10836 }
10837 
10838 static int
10839 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10840     uint32_t *return_code)
10841 {
10842 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10843 	uint8_t				i;
10844 	uint32_t			unique_id;
10845 
10846 	ASSERT(mutex_owned(&mpt->m_mutex));
10847 
10848 	unique_id = diag_query->UniqueId;
10849 
10850 	/*
10851 	 * If ID is valid, query on ID.
10852 	 * If ID is invalid, query on buffer type.
10853 	 */
10854 	if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10855 		i = diag_query->BufferType;
10856 		if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10857 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10858 			return (DDI_FAILURE);
10859 		}
10860 	} else {
10861 		i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10862 		if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10863 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10864 			return (DDI_FAILURE);
10865 		}
10866 	}
10867 
10868 	/*
10869 	 * Fill query structure with the diag buffer info.
10870 	 */
10871 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
10872 	diag_query->BufferType = pBuffer->buffer_type;
10873 	diag_query->ExtendedType = pBuffer->extended_type;
10874 	if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10875 		for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10876 		    i++) {
10877 			diag_query->ProductSpecific[i] =
10878 			    pBuffer->product_specific[i];
10879 		}
10880 	}
10881 	diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10882 	diag_query->DriverAddedBufferSize = 0;
10883 	diag_query->UniqueId = pBuffer->unique_id;
10884 	diag_query->ApplicationFlags = 0;
10885 	diag_query->DiagnosticFlags = 0;
10886 
10887 	/*
10888 	 * Set/Clear application flags
10889 	 */
10890 	if (pBuffer->immediate) {
10891 		diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10892 	} else {
10893 		diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10894 	}
10895 	if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10896 		diag_query->ApplicationFlags |=
10897 		    MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10898 	} else {
10899 		diag_query->ApplicationFlags &=
10900 		    ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10901 	}
10902 	if (pBuffer->owned_by_firmware) {
10903 		diag_query->ApplicationFlags |=
10904 		    MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10905 	} else {
10906 		diag_query->ApplicationFlags &=
10907 		    ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10908 	}
10909 
10910 	return (DDI_SUCCESS);
10911 }
10912 
10913 static int
10914 mptsas_diag_read_buffer(mptsas_t *mpt,
10915     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10916     uint32_t *return_code, int ioctl_mode)
10917 {
10918 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10919 	uint8_t				i, *pData;
10920 	uint32_t			unique_id, byte;
10921 	int				status;
10922 
10923 	ASSERT(mutex_owned(&mpt->m_mutex));
10924 
10925 	unique_id = diag_read_buffer->UniqueId;
10926 
10927 	/*
10928 	 * Get the current buffer and look up the unique ID.  The unique ID
10929 	 * should be there.
10930 	 */
10931 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10932 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10933 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10934 		return (DDI_FAILURE);
10935 	}
10936 
10937 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
10938 
10939 	/*
10940 	 * Make sure requested read is within limits
10941 	 */
10942 	if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10943 	    pBuffer->buffer_data.size) {
10944 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10945 		return (DDI_FAILURE);
10946 	}
10947 
10948 	/*
10949 	 * Copy the requested data from DMA to the diag_read_buffer.  The DMA
10950 	 * buffer that was allocated is one contiguous buffer.
10951 	 */
10952 	pData = (uint8_t *)(pBuffer->buffer_data.memp +
10953 	    diag_read_buffer->StartingOffset);
10954 	(void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10955 	    DDI_DMA_SYNC_FORCPU);
10956 	for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10957 		if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10958 		    != 0) {
10959 			return (DDI_FAILURE);
10960 		}
10961 	}
10962 	diag_read_buffer->Status = 0;
10963 
10964 	/*
10965 	 * Set or clear the Force Release flag.
10966 	 */
10967 	if (pBuffer->force_release) {
10968 		diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10969 	} else {
10970 		diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10971 	}
10972 
10973 	/*
10974 	 * If buffer is to be reregistered, make sure it's not already owned by
10975 	 * firmware first.
10976 	 */
10977 	status = DDI_SUCCESS;
10978 	if (!pBuffer->owned_by_firmware) {
10979 		if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10980 			status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10981 			    return_code);
10982 		}
10983 	}
10984 
10985 	return (status);
10986 }
10987 
10988 static int
10989 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10990     uint32_t *return_code)
10991 {
10992 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10993 	uint8_t				i;
10994 	uint32_t			unique_id;
10995 	int				status;
10996 
10997 	ASSERT(mutex_owned(&mpt->m_mutex));
10998 
10999 	unique_id = diag_release->UniqueId;
11000 
11001 	/*
11002 	 * Get the current buffer and look up the unique ID.  The unique ID
11003 	 * should be there.
11004 	 */
11005 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11006 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11007 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11008 		return (DDI_FAILURE);
11009 	}
11010 
11011 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
11012 
11013 	/*
11014 	 * If buffer is not owned by firmware, it's already been released.
11015 	 */
11016 	if (!pBuffer->owned_by_firmware) {
11017 		*return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11018 		return (DDI_FAILURE);
11019 	}
11020 
11021 	/*
11022 	 * Release the buffer.
11023 	 */
11024 	status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11025 	    MPTSAS_FW_DIAG_TYPE_RELEASE);
11026 	return (status);
11027 }
11028 
11029 static int
11030 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11031     uint32_t length, uint32_t *return_code, int ioctl_mode)
11032 {
11033 	mptsas_fw_diag_register_t	diag_register;
11034 	mptsas_fw_diag_unregister_t	diag_unregister;
11035 	mptsas_fw_diag_query_t		diag_query;
11036 	mptsas_diag_read_buffer_t	diag_read_buffer;
11037 	mptsas_fw_diag_release_t	diag_release;
11038 	int				status = DDI_SUCCESS;
11039 	uint32_t			original_return_code, read_buf_len;
11040 
11041 	ASSERT(mutex_owned(&mpt->m_mutex));
11042 
11043 	original_return_code = *return_code;
11044 	*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11045 
11046 	switch (action) {
11047 		case MPTSAS_FW_DIAG_TYPE_REGISTER:
11048 			if (!length) {
11049 				*return_code =
11050 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11051 				status = DDI_FAILURE;
11052 				break;
11053 			}
11054 			if (ddi_copyin(diag_action, &diag_register,
11055 			    sizeof (diag_register), ioctl_mode) != 0) {
11056 				return (DDI_FAILURE);
11057 			}
11058 			status = mptsas_diag_register(mpt, &diag_register,
11059 			    return_code);
11060 			break;
11061 
11062 		case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11063 			if (length < sizeof (diag_unregister)) {
11064 				*return_code =
11065 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11066 				status = DDI_FAILURE;
11067 				break;
11068 			}
11069 			if (ddi_copyin(diag_action, &diag_unregister,
11070 			    sizeof (diag_unregister), ioctl_mode) != 0) {
11071 				return (DDI_FAILURE);
11072 			}
11073 			status = mptsas_diag_unregister(mpt, &diag_unregister,
11074 			    return_code);
11075 			break;
11076 
11077 		case MPTSAS_FW_DIAG_TYPE_QUERY:
11078 			if (length < sizeof (diag_query)) {
11079 				*return_code =
11080 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11081 				status = DDI_FAILURE;
11082 				break;
11083 			}
11084 			if (ddi_copyin(diag_action, &diag_query,
11085 			    sizeof (diag_query), ioctl_mode) != 0) {
11086 				return (DDI_FAILURE);
11087 			}
11088 			status = mptsas_diag_query(mpt, &diag_query,
11089 			    return_code);
11090 			if (status == DDI_SUCCESS) {
11091 				if (ddi_copyout(&diag_query, diag_action,
11092 				    sizeof (diag_query), ioctl_mode) != 0) {
11093 					return (DDI_FAILURE);
11094 				}
11095 			}
11096 			break;
11097 
11098 		case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11099 			if (ddi_copyin(diag_action, &diag_read_buffer,
11100 			    sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11101 				return (DDI_FAILURE);
11102 			}
11103 			read_buf_len = sizeof (diag_read_buffer) -
11104 			    sizeof (diag_read_buffer.DataBuffer) +
11105 			    diag_read_buffer.BytesToRead;
11106 			if (length < read_buf_len) {
11107 				*return_code =
11108 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11109 				status = DDI_FAILURE;
11110 				break;
11111 			}
11112 			status = mptsas_diag_read_buffer(mpt,
11113 			    &diag_read_buffer, diag_action +
11114 			    sizeof (diag_read_buffer) - 4, return_code,
11115 			    ioctl_mode);
11116 			if (status == DDI_SUCCESS) {
11117 				if (ddi_copyout(&diag_read_buffer, diag_action,
11118 				    sizeof (diag_read_buffer) - 4, ioctl_mode)
11119 				    != 0) {
11120 					return (DDI_FAILURE);
11121 				}
11122 			}
11123 			break;
11124 
11125 		case MPTSAS_FW_DIAG_TYPE_RELEASE:
11126 			if (length < sizeof (diag_release)) {
11127 				*return_code =
11128 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11129 				status = DDI_FAILURE;
11130 				break;
11131 			}
11132 			if (ddi_copyin(diag_action, &diag_release,
11133 			    sizeof (diag_release), ioctl_mode) != 0) {
11134 				return (DDI_FAILURE);
11135 			}
11136 			status = mptsas_diag_release(mpt, &diag_release,
11137 			    return_code);
11138 			break;
11139 
11140 		default:
11141 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11142 			status = DDI_FAILURE;
11143 			break;
11144 	}
11145 
11146 	if ((status == DDI_FAILURE) &&
11147 	    (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11148 	    (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11149 		status = DDI_SUCCESS;
11150 	}
11151 
11152 	return (status);
11153 }
11154 
11155 static int
11156 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11157 {
11158 	int			status;
11159 	mptsas_diag_action_t	driver_data;
11160 
11161 	ASSERT(mutex_owned(&mpt->m_mutex));
11162 
11163 	/*
11164 	 * Copy the user data to a driver data buffer.
11165 	 */
11166 	if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11167 	    mode) == 0) {
11168 		/*
11169 		 * Send diag action request if Action is valid
11170 		 */
11171 		if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11172 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11173 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11174 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11175 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11176 			status = mptsas_do_diag_action(mpt, driver_data.Action,
11177 			    (void *)(uintptr_t)driver_data.PtrDiagAction,
11178 			    driver_data.Length, &driver_data.ReturnCode,
11179 			    mode);
11180 			if (status == DDI_SUCCESS) {
11181 				if (ddi_copyout(&driver_data.ReturnCode,
11182 				    &user_data->ReturnCode,
11183 				    sizeof (user_data->ReturnCode), mode)
11184 				    != 0) {
11185 					status = EFAULT;
11186 				} else {
11187 					status = 0;
11188 				}
11189 			} else {
11190 				status = EIO;
11191 			}
11192 		} else {
11193 			status = EINVAL;
11194 		}
11195 	} else {
11196 		status = EFAULT;
11197 	}
11198 
11199 	return (status);
11200 }
11201 
11202 /*
11203  * This routine handles the "event query" ioctl.
11204  */
11205 static int
11206 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11207     int *rval)
11208 {
11209 	int			status;
11210 	mptsas_event_query_t	driverdata;
11211 	uint8_t			i;
11212 
11213 	driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11214 
11215 	mutex_enter(&mpt->m_mutex);
11216 	for (i = 0; i < 4; i++) {
11217 		driverdata.Types[i] = mpt->m_event_mask[i];
11218 	}
11219 	mutex_exit(&mpt->m_mutex);
11220 
11221 	if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11222 		status = EFAULT;
11223 	} else {
11224 		*rval = MPTIOCTL_STATUS_GOOD;
11225 		status = 0;
11226 	}
11227 
11228 	return (status);
11229 }
11230 
11231 /*
11232  * This routine handles the "event enable" ioctl.
11233  */
11234 static int
11235 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11236     int *rval)
11237 {
11238 	int			status;
11239 	mptsas_event_enable_t	driverdata;
11240 	uint8_t			i;
11241 
11242 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11243 		mutex_enter(&mpt->m_mutex);
11244 		for (i = 0; i < 4; i++) {
11245 			mpt->m_event_mask[i] = driverdata.Types[i];
11246 		}
11247 		mutex_exit(&mpt->m_mutex);
11248 
11249 		*rval = MPTIOCTL_STATUS_GOOD;
11250 		status = 0;
11251 	} else {
11252 		status = EFAULT;
11253 	}
11254 	return (status);
11255 }
11256 
11257 /*
11258  * This routine handles the "event report" ioctl.
11259  */
11260 static int
11261 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11262     int *rval)
11263 {
11264 	int			status;
11265 	mptsas_event_report_t	driverdata;
11266 
11267 	mutex_enter(&mpt->m_mutex);
11268 
11269 	if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11270 	    mode) == 0) {
11271 		if (driverdata.Size >= sizeof (mpt->m_events)) {
11272 			if (ddi_copyout(mpt->m_events, data->Events,
11273 			    sizeof (mpt->m_events), mode) != 0) {
11274 				status = EFAULT;
11275 			} else {
11276 				if (driverdata.Size > sizeof (mpt->m_events)) {
11277 					driverdata.Size =
11278 					    sizeof (mpt->m_events);
11279 					if (ddi_copyout(&driverdata.Size,
11280 					    &data->Size,
11281 					    sizeof (driverdata.Size),
11282 					    mode) != 0) {
11283 						status = EFAULT;
11284 					} else {
11285 						*rval = MPTIOCTL_STATUS_GOOD;
11286 						status = 0;
11287 					}
11288 				} else {
11289 					*rval = MPTIOCTL_STATUS_GOOD;
11290 					status = 0;
11291 				}
11292 			}
11293 		} else {
11294 			*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11295 			status = 0;
11296 		}
11297 	} else {
11298 		status = EFAULT;
11299 	}
11300 
11301 	mutex_exit(&mpt->m_mutex);
11302 	return (status);
11303 }
11304 
11305 static void
11306 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11307 {
11308 	int	*reg_data;
11309 	uint_t	reglen;
11310 
11311 	/*
11312 	 * Lookup the 'reg' property and extract the other data
11313 	 */
11314 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11315 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
11316 	    DDI_PROP_SUCCESS) {
11317 		/*
11318 		 * Extract the PCI data from the 'reg' property first DWORD.
11319 		 * The entry looks like the following:
11320 		 * First DWORD:
11321 		 * Bits 0 - 7 8-bit Register number
11322 		 * Bits 8 - 10 3-bit Function number
11323 		 * Bits 11 - 15 5-bit Device number
11324 		 * Bits 16 - 23 8-bit Bus number
11325 		 * Bits 24 - 25 2-bit Address Space type identifier
11326 		 *
11327 		 */
11328 		adapter_data->PciInformation.u.bits.BusNumber =
11329 		    (reg_data[0] & 0x00FF0000) >> 16;
11330 		adapter_data->PciInformation.u.bits.DeviceNumber =
11331 		    (reg_data[0] & 0x0000F800) >> 11;
11332 		adapter_data->PciInformation.u.bits.FunctionNumber =
11333 		    (reg_data[0] & 0x00000700) >> 8;
11334 		ddi_prop_free((void *)reg_data);
11335 	} else {
11336 		/*
11337 		 * If we can't determine the PCI data then we fill in FF's for
11338 		 * the data to indicate this.
11339 		 */
11340 		adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11341 		adapter_data->MpiPortNumber = 0xFFFFFFFF;
11342 		adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11343 	}
11344 
11345 	/*
11346 	 * Saved in the mpt->m_fwversion
11347 	 */
11348 	adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11349 }
11350 
11351 static void
11352 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11353 {
11354 	char	*driver_verstr = MPTSAS_MOD_STRING;
11355 
11356 	mptsas_lookup_pci_data(mpt, adapter_data);
11357 	adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11358 	adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11359 	adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11360 	adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11361 	adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11362 	(void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11363 	adapter_data->BiosVersion = 0;
11364 	(void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11365 }
11366 
11367 static void
11368 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11369 {
11370 	int	*reg_data, i;
11371 	uint_t	reglen;
11372 
11373 	/*
11374 	 * Lookup the 'reg' property and extract the other data
11375 	 */
11376 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11377 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
11378 	    DDI_PROP_SUCCESS) {
11379 		/*
11380 		 * Extract the PCI data from the 'reg' property first DWORD.
11381 		 * The entry looks like the following:
11382 		 * First DWORD:
11383 		 * Bits 8 - 10 3-bit Function number
11384 		 * Bits 11 - 15 5-bit Device number
11385 		 * Bits 16 - 23 8-bit Bus number
11386 		 */
11387 		pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11388 		pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11389 		pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11390 		ddi_prop_free((void *)reg_data);
11391 	} else {
11392 		/*
11393 		 * If we can't determine the PCI info then we fill in FF's for
11394 		 * the data to indicate this.
11395 		 */
11396 		pci_info->BusNumber = 0xFFFFFFFF;
11397 		pci_info->DeviceNumber = 0xFF;
11398 		pci_info->FunctionNumber = 0xFF;
11399 	}
11400 
11401 	/*
11402 	 * Now get the interrupt vector and the pci header.  The vector can
11403 	 * only be 0 right now.  The header is the first 256 bytes of config
11404 	 * space.
11405 	 */
11406 	pci_info->InterruptVector = 0;
11407 	for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11408 		pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11409 		    i);
11410 	}
11411 }
11412 
11413 static int
11414 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11415 {
11416 	int			status = 0;
11417 	mptsas_reg_access_t	driverdata;
11418 
11419 	mutex_enter(&mpt->m_mutex);
11420 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11421 		switch (driverdata.Command) {
11422 			/*
11423 			 * IO access is not supported.
11424 			 */
11425 			case REG_IO_READ:
11426 			case REG_IO_WRITE:
11427 				mptsas_log(mpt, CE_WARN, "IO access is not "
11428 				    "supported.  Use memory access.");
11429 				status = EINVAL;
11430 				break;
11431 
11432 			case REG_MEM_READ:
11433 				driverdata.RegData = ddi_get32(mpt->m_datap,
11434 				    (uint32_t *)(void *)mpt->m_reg +
11435 				    driverdata.RegOffset);
11436 				if (ddi_copyout(&driverdata.RegData,
11437 				    &data->RegData,
11438 				    sizeof (driverdata.RegData), mode) != 0) {
11439 					mptsas_log(mpt, CE_WARN, "Register "
11440 					    "Read Failed");
11441 					status = EFAULT;
11442 				}
11443 				break;
11444 
11445 			case REG_MEM_WRITE:
11446 				ddi_put32(mpt->m_datap,
11447 				    (uint32_t *)(void *)mpt->m_reg +
11448 				    driverdata.RegOffset,
11449 				    driverdata.RegData);
11450 				break;
11451 
11452 			default:
11453 				status = EINVAL;
11454 				break;
11455 		}
11456 	} else {
11457 		status = EFAULT;
11458 	}
11459 
11460 	mutex_exit(&mpt->m_mutex);
11461 	return (status);
11462 }
11463 
11464 static int
11465 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11466     int *rval)
11467 {
11468 	int			status = 0;
11469 	mptsas_t		*mpt;
11470 	mptsas_update_flash_t	flashdata;
11471 	mptsas_pass_thru_t	passthru_data;
11472 	mptsas_adapter_data_t   adapter_data;
11473 	mptsas_pci_info_t	pci_info;
11474 	int			copylen;
11475 
11476 	int			iport_flag = 0;
11477 	dev_info_t		*dip = NULL;
11478 	mptsas_phymask_t	phymask = 0;
11479 	struct devctl_iocdata	*dcp = NULL;
11480 	uint32_t		slotstatus = 0;
11481 	char			*addr = NULL;
11482 	mptsas_target_t		*ptgt = NULL;
11483 
11484 	*rval = MPTIOCTL_STATUS_GOOD;
11485 	if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11486 		return (EPERM);
11487 	}
11488 
11489 	mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11490 	if (mpt == NULL) {
11491 		/*
11492 		 * Called from iport node, get the states
11493 		 */
11494 		iport_flag = 1;
11495 		dip = mptsas_get_dip_from_dev(dev, &phymask);
11496 		if (dip == NULL) {
11497 			return (ENXIO);
11498 		}
11499 		mpt = DIP2MPT(dip);
11500 	}
11501 	/* Make sure power level is D0 before accessing registers */
11502 	mutex_enter(&mpt->m_mutex);
11503 	if (mpt->m_options & MPTSAS_OPT_PM) {
11504 		(void) pm_busy_component(mpt->m_dip, 0);
11505 		if (mpt->m_power_level != PM_LEVEL_D0) {
11506 			mutex_exit(&mpt->m_mutex);
11507 			if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11508 			    DDI_SUCCESS) {
11509 				mptsas_log(mpt, CE_WARN,
11510 				    "mptsas%d: mptsas_ioctl: Raise power "
11511 				    "request failed.", mpt->m_instance);
11512 				(void) pm_idle_component(mpt->m_dip, 0);
11513 				return (ENXIO);
11514 			}
11515 		} else {
11516 			mutex_exit(&mpt->m_mutex);
11517 		}
11518 	} else {
11519 		mutex_exit(&mpt->m_mutex);
11520 	}
11521 
11522 	if (iport_flag) {
11523 		status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11524 		if (status != 0) {
11525 			goto out;
11526 		}
11527 		/*
11528 		 * The following code control the OK2RM LED, it doesn't affect
11529 		 * the ioctl return status.
11530 		 */
11531 		if ((cmd == DEVCTL_DEVICE_ONLINE) ||
11532 		    (cmd == DEVCTL_DEVICE_OFFLINE)) {
11533 			if (ndi_dc_allochdl((void *)data, &dcp) !=
11534 			    NDI_SUCCESS) {
11535 				goto out;
11536 			}
11537 			addr = ndi_dc_getaddr(dcp);
11538 			ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
11539 			if (ptgt == NULL) {
11540 				NDBG14(("mptsas_ioctl led control: tgt %s not "
11541 				    "found", addr));
11542 				ndi_dc_freehdl(dcp);
11543 				goto out;
11544 			}
11545 			mutex_enter(&mpt->m_mutex);
11546 			if (cmd == DEVCTL_DEVICE_ONLINE) {
11547 				ptgt->m_tgt_unconfigured = 0;
11548 			} else if (cmd == DEVCTL_DEVICE_OFFLINE) {
11549 				ptgt->m_tgt_unconfigured = 1;
11550 			}
11551 			slotstatus = 0;
11552 #ifdef MPTSAS_GET_LED
11553 			/*
11554 			 * The get led status can't get a valid/reasonable
11555 			 * state, so ignore the get led status, and write the
11556 			 * required value directly
11557 			 */
11558 			if (mptsas_get_led_status(mpt, ptgt, &slotstatus) !=
11559 			    DDI_SUCCESS) {
11560 				NDBG14(("mptsas_ioctl: get LED for tgt %s "
11561 				    "failed %x", addr, slotstatus));
11562 				slotstatus = 0;
11563 			}
11564 			NDBG14(("mptsas_ioctl: LED status %x for %s",
11565 			    slotstatus, addr));
11566 #endif
11567 			if (cmd == DEVCTL_DEVICE_OFFLINE) {
11568 				slotstatus |=
11569 				    MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11570 			} else {
11571 				slotstatus &=
11572 				    ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11573 			}
11574 			if (mptsas_set_led_status(mpt, ptgt, slotstatus) !=
11575 			    DDI_SUCCESS) {
11576 				NDBG14(("mptsas_ioctl: set LED for tgt %s "
11577 				    "failed %x", addr, slotstatus));
11578 			}
11579 			mutex_exit(&mpt->m_mutex);
11580 			ndi_dc_freehdl(dcp);
11581 		}
11582 		goto out;
11583 	}
11584 	switch (cmd) {
11585 		case MPTIOCTL_UPDATE_FLASH:
11586 			if (ddi_copyin((void *)data, &flashdata,
11587 				sizeof (struct mptsas_update_flash), mode)) {
11588 				status = EFAULT;
11589 				break;
11590 			}
11591 
11592 			mutex_enter(&mpt->m_mutex);
11593 			if (mptsas_update_flash(mpt,
11594 			    (caddr_t)(long)flashdata.PtrBuffer,
11595 			    flashdata.ImageSize, flashdata.ImageType, mode)) {
11596 				status = EFAULT;
11597 			}
11598 
11599 			/*
11600 			 * Reset the chip to start using the new
11601 			 * firmware.  Reset if failed also.
11602 			 */
11603 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11604 				status = EFAULT;
11605 			}
11606 			mutex_exit(&mpt->m_mutex);
11607 			break;
11608 		case MPTIOCTL_PASS_THRU:
11609 			/*
11610 			 * The user has requested to pass through a command to
11611 			 * be executed by the MPT firmware.  Call our routine
11612 			 * which does this.  Only allow one passthru IOCTL at
11613 			 * one time.
11614 			 */
11615 			if (ddi_copyin((void *)data, &passthru_data,
11616 			    sizeof (mptsas_pass_thru_t), mode)) {
11617 				status = EFAULT;
11618 				break;
11619 			}
11620 			mutex_enter(&mpt->m_mutex);
11621 			if (mpt->m_passthru_in_progress) {
11622 				mutex_exit(&mpt->m_mutex);
11623 				return (EBUSY);
11624 			}
11625 			mpt->m_passthru_in_progress = 1;
11626 			status = mptsas_pass_thru(mpt, &passthru_data, mode);
11627 			mpt->m_passthru_in_progress = 0;
11628 			mutex_exit(&mpt->m_mutex);
11629 
11630 			break;
11631 		case MPTIOCTL_GET_ADAPTER_DATA:
11632 			/*
11633 			 * The user has requested to read adapter data.  Call
11634 			 * our routine which does this.
11635 			 */
11636 			bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11637 			if (ddi_copyin((void *)data, (void *)&adapter_data,
11638 			    sizeof (mptsas_adapter_data_t), mode)) {
11639 				status = EFAULT;
11640 				break;
11641 			}
11642 			if (adapter_data.StructureLength >=
11643 			    sizeof (mptsas_adapter_data_t)) {
11644 				adapter_data.StructureLength = (uint32_t)
11645 				    sizeof (mptsas_adapter_data_t);
11646 				copylen = sizeof (mptsas_adapter_data_t);
11647 				mutex_enter(&mpt->m_mutex);
11648 				mptsas_read_adapter_data(mpt, &adapter_data);
11649 				mutex_exit(&mpt->m_mutex);
11650 			} else {
11651 				adapter_data.StructureLength = (uint32_t)
11652 				    sizeof (mptsas_adapter_data_t);
11653 				copylen = sizeof (adapter_data.StructureLength);
11654 				*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11655 			}
11656 			if (ddi_copyout((void *)(&adapter_data), (void *)data,
11657 			    copylen, mode) != 0) {
11658 				status = EFAULT;
11659 			}
11660 			break;
11661 		case MPTIOCTL_GET_PCI_INFO:
11662 			/*
11663 			 * The user has requested to read pci info.  Call
11664 			 * our routine which does this.
11665 			 */
11666 			bzero(&pci_info, sizeof (mptsas_pci_info_t));
11667 			mutex_enter(&mpt->m_mutex);
11668 			mptsas_read_pci_info(mpt, &pci_info);
11669 			mutex_exit(&mpt->m_mutex);
11670 			if (ddi_copyout((void *)(&pci_info), (void *)data,
11671 			    sizeof (mptsas_pci_info_t), mode) != 0) {
11672 				status = EFAULT;
11673 			}
11674 			break;
11675 		case MPTIOCTL_RESET_ADAPTER:
11676 			mutex_enter(&mpt->m_mutex);
11677 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11678 				mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11679 				    "failed");
11680 				status = EFAULT;
11681 			}
11682 			mutex_exit(&mpt->m_mutex);
11683 			break;
11684 		case MPTIOCTL_DIAG_ACTION:
11685 			/*
11686 			 * The user has done a diag buffer action.  Call our
11687 			 * routine which does this.  Only allow one diag action
11688 			 * at one time.
11689 			 */
11690 			mutex_enter(&mpt->m_mutex);
11691 			if (mpt->m_diag_action_in_progress) {
11692 				mutex_exit(&mpt->m_mutex);
11693 				return (EBUSY);
11694 			}
11695 			mpt->m_diag_action_in_progress = 1;
11696 			status = mptsas_diag_action(mpt,
11697 			    (mptsas_diag_action_t *)data, mode);
11698 			mpt->m_diag_action_in_progress = 0;
11699 			mutex_exit(&mpt->m_mutex);
11700 			break;
11701 		case MPTIOCTL_EVENT_QUERY:
11702 			/*
11703 			 * The user has done an event query. Call our routine
11704 			 * which does this.
11705 			 */
11706 			status = mptsas_event_query(mpt,
11707 			    (mptsas_event_query_t *)data, mode, rval);
11708 			break;
11709 		case MPTIOCTL_EVENT_ENABLE:
11710 			/*
11711 			 * The user has done an event enable. Call our routine
11712 			 * which does this.
11713 			 */
11714 			status = mptsas_event_enable(mpt,
11715 			    (mptsas_event_enable_t *)data, mode, rval);
11716 			break;
11717 		case MPTIOCTL_EVENT_REPORT:
11718 			/*
11719 			 * The user has done an event report. Call our routine
11720 			 * which does this.
11721 			 */
11722 			status = mptsas_event_report(mpt,
11723 			    (mptsas_event_report_t *)data, mode, rval);
11724 			break;
11725 		case MPTIOCTL_REG_ACCESS:
11726 			/*
11727 			 * The user has requested register access.  Call our
11728 			 * routine which does this.
11729 			 */
11730 			status = mptsas_reg_access(mpt,
11731 			    (mptsas_reg_access_t *)data, mode);
11732 			break;
11733 		default:
11734 			status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11735 			    rval);
11736 			break;
11737 	}
11738 
11739 out:
11740 	/*
11741 	 * Report idle status to pm after grace period because
11742 	 * multiple ioctls may be queued and raising power
11743 	 * for every ioctl is time consuming.  If a timeout is
11744 	 * pending for the previous ioctl, cancel the timeout and
11745 	 * report idle status to pm because calls to pm_busy_component(9F)
11746 	 * are stacked.
11747 	 */
11748 	mutex_enter(&mpt->m_mutex);
11749 	if (mpt->m_options & MPTSAS_OPT_PM) {
11750 		if (mpt->m_pm_timeid != 0) {
11751 			timeout_id_t tid = mpt->m_pm_timeid;
11752 			mpt->m_pm_timeid = 0;
11753 			mutex_exit(&mpt->m_mutex);
11754 			(void) untimeout(tid);
11755 			/*
11756 			 * Report idle status for previous ioctl since
11757 			 * calls to pm_busy_component(9F) are stacked.
11758 			 */
11759 			(void) pm_idle_component(mpt->m_dip, 0);
11760 			mutex_enter(&mpt->m_mutex);
11761 		}
11762 		mpt->m_pm_timeid = timeout(mptsas_idle_pm, mpt,
11763 		    drv_usectohz((clock_t)mpt->m_pm_idle_delay * 1000000));
11764 	}
11765 	mutex_exit(&mpt->m_mutex);
11766 
11767 	return (status);
11768 }
11769 
11770 int
11771 mptsas_restart_ioc(mptsas_t *mpt)
11772 {
11773 	int		rval = DDI_SUCCESS;
11774 	mptsas_target_t	*ptgt = NULL;
11775 
11776 	ASSERT(mutex_owned(&mpt->m_mutex));
11777 
11778 	/*
11779 	 * Set a flag telling I/O path that we're processing a reset.  This is
11780 	 * needed because after the reset is complete, the hash table still
11781 	 * needs to be rebuilt.  If I/Os are started before the hash table is
11782 	 * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
11783 	 * so that they can be retried.
11784 	 */
11785 	mpt->m_in_reset = TRUE;
11786 
11787 	/*
11788 	 * Set all throttles to HOLD
11789 	 */
11790 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11791 	    MPTSAS_HASH_FIRST);
11792 	while (ptgt != NULL) {
11793 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11794 
11795 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11796 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11797 	}
11798 
11799 	/*
11800 	 * Disable interrupts
11801 	 */
11802 	MPTSAS_DISABLE_INTR(mpt);
11803 
11804 	/*
11805 	 * Abort all commands: outstanding commands, commands in waitq and
11806 	 * tx_waitq.
11807 	 */
11808 	mptsas_flush_hba(mpt);
11809 
11810 	/*
11811 	 * Reinitialize the chip.
11812 	 */
11813 	if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11814 		rval = DDI_FAILURE;
11815 	}
11816 
11817 	/*
11818 	 * Enable interrupts again
11819 	 */
11820 	MPTSAS_ENABLE_INTR(mpt);
11821 
11822 	/*
11823 	 * If mptsas_init_chip was successful, update the driver data.
11824 	 */
11825 	if (rval == DDI_SUCCESS) {
11826 		mptsas_update_driver_data(mpt);
11827 	}
11828 
11829 	/*
11830 	 * Reset the throttles
11831 	 */
11832 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11833 	    MPTSAS_HASH_FIRST);
11834 	while (ptgt != NULL) {
11835 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11836 
11837 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11838 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11839 	}
11840 
11841 	mptsas_doneq_empty(mpt);
11842 	mptsas_restart_hba(mpt);
11843 
11844 	if (rval != DDI_SUCCESS) {
11845 		mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11846 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11847 	}
11848 
11849 	/*
11850 	 * Clear the reset flag so that I/Os can continue.
11851 	 */
11852 	mpt->m_in_reset = FALSE;
11853 
11854 	return (rval);
11855 }
11856 
11857 int
11858 mptsas_init_chip(mptsas_t *mpt, int first_time)
11859 {
11860 	ddi_dma_cookie_t	cookie;
11861 	uint32_t		i;
11862 	mptsas_slots_t		*new_active;
11863 
11864 	/*
11865 	 * Check to see if the firmware image is valid
11866 	 */
11867 	if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11868 	    MPI2_DIAG_FLASH_BAD_SIG) {
11869 		mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11870 		goto fail;
11871 	}
11872 
11873 	/*
11874 	 * Reset the chip
11875 	 */
11876 	if (mptsas_ioc_reset(mpt) == MPTSAS_RESET_FAIL) {
11877 		mptsas_log(mpt, CE_WARN, "hard reset failed!");
11878 		goto fail;
11879 	}
11880 
11881 	if (first_time == FALSE) {
11882 		/*
11883 		 * De-allocate buffers before re-allocating them using the
11884 		 * latest IOC facts.
11885 		 */
11886 		mptsas_hba_fini(mpt);
11887 
11888 		/*
11889 		 * Setup configuration space
11890 		 */
11891 		if (mptsas_config_space_init(mpt) == FALSE) {
11892 			mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11893 			    "failed!");
11894 			goto fail;
11895 		}
11896 	}
11897 
11898 	/*
11899 	 * IOC facts can change after a diag reset so all buffers that are
11900 	 * based on these numbers must be de-allocated and re-allocated.  Get
11901 	 * new IOC facts each time chip is initialized.
11902 	 */
11903 	if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11904 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11905 		goto fail;
11906 	}
11907 	/*
11908 	 * Re-allocate active slots here if not the first reset.  Since
11909 	 * m_active could have a different number of slots allocated after a
11910 	 * reset, just de-allocate the old m_active structure and re-allocate a
11911 	 * new one.  Save the tables and IR info from the old m_active.
11912 	 */
11913 	if (first_time == FALSE) {
11914 		new_active = kmem_zalloc(MPTSAS_SLOTS_SIZE(mpt), KM_SLEEP);
11915 		if (new_active == NULL) {
11916 			mptsas_log(mpt, CE_WARN, "Re-alloc of active slots "
11917 			    "failed!");
11918 			goto fail;
11919 		} else {
11920 			new_active->m_n_slots = (mpt->m_max_requests - 2);
11921 			new_active->m_size = MPTSAS_SLOTS_SIZE(mpt);
11922 			new_active->m_tags = 1;
11923 			new_active->m_tgttbl = mpt->m_active->m_tgttbl;
11924 			new_active->m_smptbl = mpt->m_active->m_smptbl;
11925 			new_active->m_num_raid_configs =
11926 			    mpt->m_active->m_num_raid_configs;
11927 			for (i = 0; i < new_active->m_num_raid_configs; i++) {
11928 				new_active->m_raidconfig[i] =
11929 				    mpt->m_active->m_raidconfig[i];
11930 			}
11931 			kmem_free(mpt->m_active, mpt->m_active->m_size);
11932 			mpt->m_active = new_active;
11933 		}
11934 	}
11935 
11936 	/*
11937 	 * Allocate request message frames, reply free queue, reply descriptor
11938 	 * post queue, and reply message frames using latest IOC facts.
11939 	 */
11940 	if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11941 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11942 		goto fail;
11943 	}
11944 	if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11945 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11946 		goto fail;
11947 	}
11948 	if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11949 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11950 		goto fail;
11951 	}
11952 	if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11953 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11954 		goto fail;
11955 	}
11956 
11957 	/*
11958 	 * Re-Initialize ioc to operational state
11959 	 */
11960 	if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11961 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11962 		goto fail;
11963 	}
11964 
11965 	mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
11966 	    mpt->m_max_replies, KM_SLEEP);
11967 
11968 	/*
11969 	 * Initialize reply post index.  Reply free index is initialized after
11970 	 * the next loop.
11971 	 */
11972 	mpt->m_post_index = 0;
11973 
11974 	/*
11975 	 * Initialize the Reply Free Queue with the physical addresses of our
11976 	 * reply frames.
11977 	 */
11978 	cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11979 	for (i = 0; i < mpt->m_max_replies; i++) {
11980 		ddi_put32(mpt->m_acc_free_queue_hdl,
11981 		    &((uint32_t *)(void *)mpt->m_free_queue)[i],
11982 		    cookie.dmac_address);
11983 		cookie.dmac_address += mpt->m_reply_frame_size;
11984 	}
11985 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11986 	    DDI_DMA_SYNC_FORDEV);
11987 
11988 	/*
11989 	 * Initialize the reply free index to one past the last frame on the
11990 	 * queue.  This will signify that the queue is empty to start with.
11991 	 */
11992 	mpt->m_free_index = i;
11993 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11994 
11995 	/*
11996 	 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11997 	 */
11998 	for (i = 0; i < mpt->m_post_queue_depth; i++) {
11999 		ddi_put64(mpt->m_acc_post_queue_hdl,
12000 		    &((uint64_t *)(void *)mpt->m_post_queue)[i],
12001 		    0xFFFFFFFFFFFFFFFF);
12002 	}
12003 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12004 	    DDI_DMA_SYNC_FORDEV);
12005 
12006 	/*
12007 	 * Enable ports
12008 	 */
12009 	if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12010 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12011 		goto fail;
12012 	}
12013 
12014 	/*
12015 	 * Fill in the phy_info structure and get the base WWID
12016 	 */
12017 
12018 	if (first_time == TRUE) {
12019 		if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
12020 			mptsas_log(mpt, CE_WARN,
12021 			    "mptsas_get_manufacture_page5 failed!");
12022 			goto fail;
12023 		}
12024 
12025 		if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
12026 			mptsas_log(mpt, CE_WARN,
12027 			    "mptsas_get_sas_io_unit_page_hndshk failed!");
12028 			goto fail;
12029 		}
12030 
12031 		if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
12032 			mptsas_log(mpt, CE_WARN,
12033 			    "mptsas_get_manufacture_page0 failed!");
12034 			goto fail;
12035 		}
12036 	}
12037 
12038 	/*
12039 	 * enable events
12040 	 */
12041 	if (first_time == FALSE) {
12042 		if (mptsas_ioc_enable_event_notification(mpt)) {
12043 			goto fail;
12044 		}
12045 	}
12046 
12047 	/*
12048 	 * We need checks in attach and these.
12049 	 * chip_init is called in mult. places
12050 	 */
12051 
12052 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12053 	    DDI_SUCCESS) ||
12054 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12055 	    DDI_SUCCESS) ||
12056 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12057 	    DDI_SUCCESS) ||
12058 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12059 	    DDI_SUCCESS) ||
12060 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12061 	    DDI_SUCCESS)) {
12062 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12063 		goto fail;
12064 	}
12065 
12066 	/* Check all acc handles */
12067 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12068 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12069 	    DDI_SUCCESS) ||
12070 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12071 	    DDI_SUCCESS) ||
12072 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12073 	    DDI_SUCCESS) ||
12074 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12075 	    DDI_SUCCESS) ||
12076 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12077 	    DDI_SUCCESS) ||
12078 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
12079 	    DDI_SUCCESS)) {
12080 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12081 		goto fail;
12082 	}
12083 
12084 	return (DDI_SUCCESS);
12085 
12086 fail:
12087 	return (DDI_FAILURE);
12088 }
12089 
12090 static int
12091 mptsas_init_pm(mptsas_t *mpt)
12092 {
12093 	char		pmc_name[16];
12094 	char		*pmc[] = {
12095 				NULL,
12096 				"0=Off (PCI D3 State)",
12097 				"3=On (PCI D0 State)",
12098 				NULL
12099 			};
12100 	uint16_t	pmcsr_stat;
12101 
12102 	/*
12103 	 * If power management is supported by this chip, create
12104 	 * pm-components property for the power management framework
12105 	 */
12106 	(void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12107 	pmc[0] = pmc_name;
12108 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12109 	    "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12110 		mpt->m_options &= ~MPTSAS_OPT_PM;
12111 		mptsas_log(mpt, CE_WARN,
12112 		    "mptsas%d: pm-component property creation failed.",
12113 		    mpt->m_instance);
12114 		return (DDI_FAILURE);
12115 	}
12116 
12117 	/*
12118 	 * Power on device.
12119 	 */
12120 	(void) pm_busy_component(mpt->m_dip, 0);
12121 	pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12122 	    mpt->m_pmcsr_offset);
12123 	if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12124 		mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12125 		    mpt->m_instance);
12126 		pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12127 		    PCI_PMCSR_D0);
12128 	}
12129 	if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12130 		mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12131 		return (DDI_FAILURE);
12132 	}
12133 	mpt->m_power_level = PM_LEVEL_D0;
12134 	/*
12135 	 * Set pm idle delay.
12136 	 */
12137 	mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12138 	    mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12139 
12140 	return (DDI_SUCCESS);
12141 }
12142 
12143 /*
12144  * mptsas_add_intrs:
12145  *
12146  * Register FIXED or MSI interrupts.
12147  */
12148 static int
12149 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12150 {
12151 	dev_info_t	*dip = mpt->m_dip;
12152 	int		avail, actual, count = 0;
12153 	int		i, flag, ret;
12154 
12155 	NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12156 
12157 	/* Get number of interrupts */
12158 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12159 	if ((ret != DDI_SUCCESS) || (count <= 0)) {
12160 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12161 		    "ret %d count %d\n", ret, count);
12162 
12163 		return (DDI_FAILURE);
12164 	}
12165 
12166 	/* Get number of available interrupts */
12167 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
12168 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
12169 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12170 		    "ret %d avail %d\n", ret, avail);
12171 
12172 		return (DDI_FAILURE);
12173 	}
12174 
12175 	if (avail < count) {
12176 		mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12177 		    "navail() returned %d", count, avail);
12178 	}
12179 
12180 	/* Mpt only have one interrupt routine */
12181 	if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12182 		count = 1;
12183 	}
12184 
12185 	/* Allocate an array of interrupt handles */
12186 	mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12187 	mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12188 
12189 	flag = DDI_INTR_ALLOC_NORMAL;
12190 
12191 	/* call ddi_intr_alloc() */
12192 	ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12193 	    count, &actual, flag);
12194 
12195 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
12196 		mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12197 		    ret);
12198 		kmem_free(mpt->m_htable, mpt->m_intr_size);
12199 		return (DDI_FAILURE);
12200 	}
12201 
12202 	/* use interrupt count returned or abort? */
12203 	if (actual < count) {
12204 		mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12205 		    count, actual);
12206 	}
12207 
12208 	mpt->m_intr_cnt = actual;
12209 
12210 	/*
12211 	 * Get priority for first msi, assume remaining are all the same
12212 	 */
12213 	if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12214 	    &mpt->m_intr_pri)) != DDI_SUCCESS) {
12215 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12216 
12217 		/* Free already allocated intr */
12218 		for (i = 0; i < actual; i++) {
12219 			(void) ddi_intr_free(mpt->m_htable[i]);
12220 		}
12221 
12222 		kmem_free(mpt->m_htable, mpt->m_intr_size);
12223 		return (DDI_FAILURE);
12224 	}
12225 
12226 	/* Test for high level mutex */
12227 	if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12228 		mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12229 		    "Hi level interrupt not supported\n");
12230 
12231 		/* Free already allocated intr */
12232 		for (i = 0; i < actual; i++) {
12233 			(void) ddi_intr_free(mpt->m_htable[i]);
12234 		}
12235 
12236 		kmem_free(mpt->m_htable, mpt->m_intr_size);
12237 		return (DDI_FAILURE);
12238 	}
12239 
12240 	/* Call ddi_intr_add_handler() */
12241 	for (i = 0; i < actual; i++) {
12242 		if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12243 		    (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12244 			mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12245 			    "failed %d\n", ret);
12246 
12247 			/* Free already allocated intr */
12248 			for (i = 0; i < actual; i++) {
12249 				(void) ddi_intr_free(mpt->m_htable[i]);
12250 			}
12251 
12252 			kmem_free(mpt->m_htable, mpt->m_intr_size);
12253 			return (DDI_FAILURE);
12254 		}
12255 	}
12256 
12257 	if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12258 	    != DDI_SUCCESS) {
12259 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12260 
12261 		/* Free already allocated intr */
12262 		for (i = 0; i < actual; i++) {
12263 			(void) ddi_intr_free(mpt->m_htable[i]);
12264 		}
12265 
12266 		kmem_free(mpt->m_htable, mpt->m_intr_size);
12267 		return (DDI_FAILURE);
12268 	}
12269 
12270 	return (DDI_SUCCESS);
12271 }
12272 
12273 /*
12274  * mptsas_rem_intrs:
12275  *
12276  * Unregister FIXED or MSI interrupts
12277  */
12278 static void
12279 mptsas_rem_intrs(mptsas_t *mpt)
12280 {
12281 	int	i;
12282 
12283 	NDBG6(("mptsas_rem_intrs"));
12284 
12285 	/* Disable all interrupts */
12286 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12287 		/* Call ddi_intr_block_disable() */
12288 		(void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12289 	} else {
12290 		for (i = 0; i < mpt->m_intr_cnt; i++) {
12291 			(void) ddi_intr_disable(mpt->m_htable[i]);
12292 		}
12293 	}
12294 
12295 	/* Call ddi_intr_remove_handler() */
12296 	for (i = 0; i < mpt->m_intr_cnt; i++) {
12297 		(void) ddi_intr_remove_handler(mpt->m_htable[i]);
12298 		(void) ddi_intr_free(mpt->m_htable[i]);
12299 	}
12300 
12301 	kmem_free(mpt->m_htable, mpt->m_intr_size);
12302 }
12303 
12304 /*
12305  * The IO fault service error handling callback function
12306  */
12307 /*ARGSUSED*/
12308 static int
12309 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12310 {
12311 	/*
12312 	 * as the driver can always deal with an error in any dma or
12313 	 * access handle, we can just return the fme_status value.
12314 	 */
12315 	pci_ereport_post(dip, err, NULL);
12316 	return (err->fme_status);
12317 }
12318 
12319 /*
12320  * mptsas_fm_init - initialize fma capabilities and register with IO
12321  *               fault services.
12322  */
12323 static void
12324 mptsas_fm_init(mptsas_t *mpt)
12325 {
12326 	/*
12327 	 * Need to change iblock to priority for new MSI intr
12328 	 */
12329 	ddi_iblock_cookie_t	fm_ibc;
12330 
12331 	/* Only register with IO Fault Services if we have some capability */
12332 	if (mpt->m_fm_capabilities) {
12333 		/* Adjust access and dma attributes for FMA */
12334 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12335 		mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12336 		mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12337 
12338 		/*
12339 		 * Register capabilities with IO Fault Services.
12340 		 * mpt->m_fm_capabilities will be updated to indicate
12341 		 * capabilities actually supported (not requested.)
12342 		 */
12343 		ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12344 
12345 		/*
12346 		 * Initialize pci ereport capabilities if ereport
12347 		 * capable (should always be.)
12348 		 */
12349 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12350 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12351 			pci_ereport_setup(mpt->m_dip);
12352 		}
12353 
12354 		/*
12355 		 * Register error callback if error callback capable.
12356 		 */
12357 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12358 			ddi_fm_handler_register(mpt->m_dip,
12359 			    mptsas_fm_error_cb, (void *) mpt);
12360 		}
12361 	}
12362 }
12363 
12364 /*
12365  * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12366  *               fault services.
12367  *
12368  */
12369 static void
12370 mptsas_fm_fini(mptsas_t *mpt)
12371 {
12372 	/* Only unregister FMA capabilities if registered */
12373 	if (mpt->m_fm_capabilities) {
12374 
12375 		/*
12376 		 * Un-register error callback if error callback capable.
12377 		 */
12378 
12379 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12380 			ddi_fm_handler_unregister(mpt->m_dip);
12381 		}
12382 
12383 		/*
12384 		 * Release any resources allocated by pci_ereport_setup()
12385 		 */
12386 
12387 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12388 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12389 			pci_ereport_teardown(mpt->m_dip);
12390 		}
12391 
12392 		/* Unregister from IO Fault Services */
12393 		ddi_fm_fini(mpt->m_dip);
12394 
12395 		/* Adjust access and dma attributes for FMA */
12396 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12397 		mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12398 		mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12399 
12400 	}
12401 }
12402 
12403 int
12404 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12405 {
12406 	ddi_fm_error_t	de;
12407 
12408 	if (handle == NULL)
12409 		return (DDI_FAILURE);
12410 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12411 	return (de.fme_status);
12412 }
12413 
12414 int
12415 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12416 {
12417 	ddi_fm_error_t	de;
12418 
12419 	if (handle == NULL)
12420 		return (DDI_FAILURE);
12421 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12422 	return (de.fme_status);
12423 }
12424 
12425 void
12426 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12427 {
12428 	uint64_t	ena;
12429 	char		buf[FM_MAX_CLASS];
12430 
12431 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12432 	ena = fm_ena_generate(0, FM_ENA_FMT1);
12433 	if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12434 		ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12435 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12436 	}
12437 }
12438 
12439 static int
12440 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12441     uint16_t *dev_handle, mptsas_target_t **pptgt)
12442 {
12443 	int		rval;
12444 	uint32_t	dev_info;
12445 	uint64_t	sas_wwn;
12446 	mptsas_phymask_t phymask;
12447 	uint8_t		physport, phynum, config, disk;
12448 	mptsas_slots_t	*slots = mpt->m_active;
12449 	uint64_t	devicename;
12450 	uint16_t	pdev_hdl;
12451 	mptsas_target_t	*tmp_tgt = NULL;
12452 	uint16_t	bay_num, enclosure;
12453 
12454 	ASSERT(*pptgt == NULL);
12455 
12456 	rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12457 	    &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12458 	    &bay_num, &enclosure);
12459 	if (rval != DDI_SUCCESS) {
12460 		rval = DEV_INFO_FAIL_PAGE0;
12461 		return (rval);
12462 	}
12463 
12464 	if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12465 	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12466 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12467 		rval = DEV_INFO_WRONG_DEVICE_TYPE;
12468 		return (rval);
12469 	}
12470 
12471 	/*
12472 	 * Get SATA Device Name from SAS device page0 for
12473 	 * sata device, if device name doesn't exist, set m_sas_wwn to
12474 	 * 0 for direct attached SATA. For the device behind the expander
12475 	 * we still can use STP address assigned by expander.
12476 	 */
12477 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12478 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12479 		mutex_exit(&mpt->m_mutex);
12480 		/* alloc a tmp_tgt to send the cmd */
12481 		tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12482 		    KM_SLEEP);
12483 		tmp_tgt->m_devhdl = *dev_handle;
12484 		tmp_tgt->m_deviceinfo = dev_info;
12485 		tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12486 		tmp_tgt->m_qfull_retry_interval =
12487 		    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12488 		tmp_tgt->m_t_throttle = MAX_THROTTLE;
12489 		devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12490 		kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12491 		mutex_enter(&mpt->m_mutex);
12492 		if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12493 			sas_wwn = devicename;
12494 		} else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12495 			sas_wwn = 0;
12496 		}
12497 	}
12498 
12499 	/*
12500 	 * Check if the dev handle is for a Phys Disk. If so, set return value
12501 	 * and exit.  Don't add Phys Disks to hash.
12502 	 */
12503 	for (config = 0; config < slots->m_num_raid_configs; config++) {
12504 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12505 			if (*dev_handle == slots->m_raidconfig[config].
12506 			    m_physdisk_devhdl[disk]) {
12507 				rval = DEV_INFO_PHYS_DISK;
12508 				return (rval);
12509 			}
12510 		}
12511 	}
12512 
12513 	phymask = mptsas_physport_to_phymask(mpt, physport);
12514 	*pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12515 	    dev_info, phymask, phynum);
12516 	if (*pptgt == NULL) {
12517 		mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12518 		    "structure!");
12519 		rval = DEV_INFO_FAIL_ALLOC;
12520 		return (rval);
12521 	}
12522 	(*pptgt)->m_enclosure = enclosure;
12523 	(*pptgt)->m_slot_num = bay_num;
12524 	return (DEV_INFO_SUCCESS);
12525 }
12526 
12527 uint64_t
12528 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12529 {
12530 	uint64_t	sata_guid = 0, *pwwn = NULL;
12531 	int		target = ptgt->m_devhdl;
12532 	uchar_t		*inq83 = NULL;
12533 	int		inq83_len = 0xFF;
12534 	uchar_t		*dblk = NULL;
12535 	int		inq83_retry = 3;
12536 	int		rval = DDI_FAILURE;
12537 
12538 	inq83	= kmem_zalloc(inq83_len, KM_SLEEP);
12539 
12540 inq83_retry:
12541 	rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12542 	    inq83_len, NULL, 1);
12543 	if (rval != DDI_SUCCESS) {
12544 		mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12545 		    "0x83 for target:%x, lun:%x failed!", target, lun);
12546 		goto out;
12547 	}
12548 	/* According to SAT2, the first descriptor is logic unit name */
12549 	dblk = &inq83[4];
12550 	if ((dblk[1] & 0x30) != 0) {
12551 		mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12552 		goto out;
12553 	}
12554 	pwwn = (uint64_t *)(void *)(&dblk[4]);
12555 	if ((dblk[4] & 0xf0) == 0x50) {
12556 		sata_guid = BE_64(*pwwn);
12557 		goto out;
12558 	} else if (dblk[4] == 'A') {
12559 		NDBG20(("SATA drive has no NAA format GUID."));
12560 		goto out;
12561 	} else {
12562 		/* The data is not ready, wait and retry */
12563 		inq83_retry--;
12564 		if (inq83_retry <= 0) {
12565 			goto out;
12566 		}
12567 		NDBG20(("The GUID is not ready, retry..."));
12568 		delay(1 * drv_usectohz(1000000));
12569 		goto inq83_retry;
12570 	}
12571 out:
12572 	kmem_free(inq83, inq83_len);
12573 	return (sata_guid);
12574 }
12575 
12576 static int
12577 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12578     unsigned char *buf, int len, int *reallen, uchar_t evpd)
12579 {
12580 	uchar_t			cdb[CDB_GROUP0];
12581 	struct scsi_address	ap;
12582 	struct buf		*data_bp = NULL;
12583 	int			resid = 0;
12584 	int			ret = DDI_FAILURE;
12585 
12586 	ASSERT(len <= 0xffff);
12587 
12588 	ap.a_target = MPTSAS_INVALID_DEVHDL;
12589 	ap.a_lun = (uchar_t)(lun);
12590 	ap.a_hba_tran = mpt->m_tran;
12591 
12592 	data_bp = scsi_alloc_consistent_buf(&ap,
12593 	    (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12594 	if (data_bp == NULL) {
12595 		return (ret);
12596 	}
12597 	bzero(cdb, CDB_GROUP0);
12598 	cdb[0] = SCMD_INQUIRY;
12599 	cdb[1] = evpd;
12600 	cdb[2] = page;
12601 	cdb[3] = (len & 0xff00) >> 8;
12602 	cdb[4] = (len & 0x00ff);
12603 	cdb[5] = 0;
12604 
12605 	ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12606 	    &resid);
12607 	if (ret == DDI_SUCCESS) {
12608 		if (reallen) {
12609 			*reallen = len - resid;
12610 		}
12611 		bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12612 	}
12613 	if (data_bp) {
12614 		scsi_free_consistent_buf(data_bp);
12615 	}
12616 	return (ret);
12617 }
12618 
12619 static int
12620 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12621     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12622     int *resid)
12623 {
12624 	struct scsi_pkt		*pktp = NULL;
12625 	scsi_hba_tran_t		*tran_clone = NULL;
12626 	mptsas_tgt_private_t	*tgt_private = NULL;
12627 	int			ret = DDI_FAILURE;
12628 
12629 	/*
12630 	 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12631 	 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12632 	 * to simulate the cmds from sd
12633 	 */
12634 	tran_clone = kmem_alloc(
12635 	    sizeof (scsi_hba_tran_t), KM_SLEEP);
12636 	if (tran_clone == NULL) {
12637 		goto out;
12638 	}
12639 	bcopy((caddr_t)mpt->m_tran,
12640 	    (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12641 	tgt_private = kmem_alloc(
12642 	    sizeof (mptsas_tgt_private_t), KM_SLEEP);
12643 	if (tgt_private == NULL) {
12644 		goto out;
12645 	}
12646 	tgt_private->t_lun = ap->a_lun;
12647 	tgt_private->t_private = ptgt;
12648 	tran_clone->tran_tgt_private = tgt_private;
12649 	ap->a_hba_tran = tran_clone;
12650 
12651 	pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12652 	    data_bp, cdblen, sizeof (struct scsi_arq_status),
12653 	    0, PKT_CONSISTENT, NULL, NULL);
12654 	if (pktp == NULL) {
12655 		goto out;
12656 	}
12657 	bcopy(cdb, pktp->pkt_cdbp, cdblen);
12658 	pktp->pkt_flags = FLAG_NOPARITY;
12659 	if (scsi_poll(pktp) < 0) {
12660 		goto out;
12661 	}
12662 	if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12663 		goto out;
12664 	}
12665 	if (resid != NULL) {
12666 		*resid = pktp->pkt_resid;
12667 	}
12668 
12669 	ret = DDI_SUCCESS;
12670 out:
12671 	if (pktp) {
12672 		scsi_destroy_pkt(pktp);
12673 	}
12674 	if (tran_clone) {
12675 		kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12676 	}
12677 	if (tgt_private) {
12678 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12679 	}
12680 	return (ret);
12681 }
12682 static int
12683 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12684 {
12685 	char	*cp = NULL;
12686 	char	*ptr = NULL;
12687 	size_t	s = 0;
12688 	char	*wwid_str = NULL;
12689 	char	*lun_str = NULL;
12690 	long	lunnum;
12691 	long	phyid = -1;
12692 	int	rc = DDI_FAILURE;
12693 
12694 	ptr = name;
12695 	ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12696 	ptr++;
12697 	if ((cp = strchr(ptr, ',')) == NULL) {
12698 		return (DDI_FAILURE);
12699 	}
12700 
12701 	wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12702 	s = (uintptr_t)cp - (uintptr_t)ptr;
12703 
12704 	bcopy(ptr, wwid_str, s);
12705 	wwid_str[s] = '\0';
12706 
12707 	ptr = ++cp;
12708 
12709 	if ((cp = strchr(ptr, '\0')) == NULL) {
12710 		goto out;
12711 	}
12712 	lun_str =  kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12713 	s = (uintptr_t)cp - (uintptr_t)ptr;
12714 
12715 	bcopy(ptr, lun_str, s);
12716 	lun_str[s] = '\0';
12717 
12718 	if (name[0] == 'p') {
12719 		rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12720 	} else {
12721 		rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12722 	}
12723 	if (rc != DDI_SUCCESS)
12724 		goto out;
12725 
12726 	if (phyid != -1) {
12727 		ASSERT(phyid < MPTSAS_MAX_PHYS);
12728 		*phy = (uint8_t)phyid;
12729 	}
12730 	rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12731 	if (rc != 0)
12732 		goto out;
12733 
12734 	*lun = (int)lunnum;
12735 	rc = DDI_SUCCESS;
12736 out:
12737 	if (wwid_str)
12738 		kmem_free(wwid_str, SCSI_MAXNAMELEN);
12739 	if (lun_str)
12740 		kmem_free(lun_str, SCSI_MAXNAMELEN);
12741 
12742 	return (rc);
12743 }
12744 
12745 /*
12746  * mptsas_parse_smp_name() is to parse sas wwn string
12747  * which format is "wWWN"
12748  */
12749 static int
12750 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12751 {
12752 	char	*ptr = name;
12753 
12754 	if (*ptr != 'w') {
12755 		return (DDI_FAILURE);
12756 	}
12757 
12758 	ptr++;
12759 	if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12760 		return (DDI_FAILURE);
12761 	}
12762 	return (DDI_SUCCESS);
12763 }
12764 
12765 static int
12766 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12767     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12768 {
12769 	int		ret = NDI_FAILURE;
12770 	int		circ = 0;
12771 	int		circ1 = 0;
12772 	mptsas_t	*mpt;
12773 	char		*ptr = NULL;
12774 	char		*devnm = NULL;
12775 	uint64_t	wwid = 0;
12776 	uint8_t		phy = 0xFF;
12777 	int		lun = 0;
12778 	uint_t		mflags = flag;
12779 	int		bconfig = TRUE;
12780 
12781 	if (scsi_hba_iport_unit_address(pdip) == 0) {
12782 		return (DDI_FAILURE);
12783 	}
12784 
12785 	mpt = DIP2MPT(pdip);
12786 	if (!mpt) {
12787 		return (DDI_FAILURE);
12788 	}
12789 	/*
12790 	 * Hold the nexus across the bus_config
12791 	 */
12792 	ndi_devi_enter(scsi_vhci_dip, &circ);
12793 	ndi_devi_enter(pdip, &circ1);
12794 	switch (op) {
12795 	case BUS_CONFIG_ONE:
12796 		/* parse wwid/target name out of name given */
12797 		if ((ptr = strchr((char *)arg, '@')) == NULL) {
12798 			ret = NDI_FAILURE;
12799 			break;
12800 		}
12801 		ptr++;
12802 		if (strncmp((char *)arg, "smp", 3) == 0) {
12803 			/*
12804 			 * This is a SMP target device
12805 			 */
12806 			ret = mptsas_parse_smp_name(ptr, &wwid);
12807 			if (ret != DDI_SUCCESS) {
12808 				ret = NDI_FAILURE;
12809 				break;
12810 			}
12811 			ret = mptsas_config_smp(pdip, wwid, childp);
12812 		} else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12813 			/*
12814 			 * OBP could pass down a non-canonical form
12815 			 * bootpath without LUN part when LUN is 0.
12816 			 * So driver need adjust the string.
12817 			 */
12818 			if (strchr(ptr, ',') == NULL) {
12819 				devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12820 				(void) sprintf(devnm, "%s,0", (char *)arg);
12821 				ptr = strchr(devnm, '@');
12822 				ptr++;
12823 			}
12824 
12825 			/*
12826 			 * The device path is wWWID format and the device
12827 			 * is not SMP target device.
12828 			 */
12829 			ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12830 			if (ret != DDI_SUCCESS) {
12831 				ret = NDI_FAILURE;
12832 				break;
12833 			}
12834 			*childp = NULL;
12835 			if (ptr[0] == 'w') {
12836 				ret = mptsas_config_one_addr(pdip, wwid,
12837 				    lun, childp);
12838 			} else if (ptr[0] == 'p') {
12839 				ret = mptsas_config_one_phy(pdip, phy, lun,
12840 				    childp);
12841 			}
12842 
12843 			/*
12844 			 * If this is CD/DVD device in OBP path, the
12845 			 * ndi_busop_bus_config can be skipped as config one
12846 			 * operation is done above.
12847 			 */
12848 			if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12849 			    (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12850 			    (strncmp((char *)arg, "disk", 4) == 0)) {
12851 				bconfig = FALSE;
12852 				ndi_hold_devi(*childp);
12853 			}
12854 		} else {
12855 			ret = NDI_FAILURE;
12856 			break;
12857 		}
12858 
12859 		/*
12860 		 * DDI group instructed us to use this flag.
12861 		 */
12862 		mflags |= NDI_MDI_FALLBACK;
12863 		break;
12864 	case BUS_CONFIG_DRIVER:
12865 	case BUS_CONFIG_ALL:
12866 		mptsas_config_all(pdip);
12867 		ret = NDI_SUCCESS;
12868 		break;
12869 	}
12870 
12871 	if ((ret == NDI_SUCCESS) && bconfig) {
12872 		ret = ndi_busop_bus_config(pdip, mflags, op,
12873 		    (devnm == NULL) ? arg : devnm, childp, 0);
12874 	}
12875 
12876 	ndi_devi_exit(pdip, circ1);
12877 	ndi_devi_exit(scsi_vhci_dip, circ);
12878 	if (devnm != NULL)
12879 		kmem_free(devnm, SCSI_MAXNAMELEN);
12880 	return (ret);
12881 }
12882 
12883 static int
12884 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12885     mptsas_target_t *ptgt)
12886 {
12887 	int			rval = DDI_FAILURE;
12888 	struct scsi_inquiry	*sd_inq = NULL;
12889 	mptsas_t		*mpt = DIP2MPT(pdip);
12890 
12891 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12892 
12893 	rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12894 	    SUN_INQSIZE, 0, (uchar_t)0);
12895 
12896 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12897 		rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12898 	} else {
12899 		rval = DDI_FAILURE;
12900 	}
12901 
12902 	kmem_free(sd_inq, SUN_INQSIZE);
12903 	return (rval);
12904 }
12905 
12906 static int
12907 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12908     dev_info_t **lundip)
12909 {
12910 	int		rval;
12911 	mptsas_t		*mpt = DIP2MPT(pdip);
12912 	int		phymask;
12913 	mptsas_target_t	*ptgt = NULL;
12914 
12915 	/*
12916 	 * Get the physical port associated to the iport
12917 	 */
12918 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12919 	    "phymask", 0);
12920 
12921 	ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12922 	if (ptgt == NULL) {
12923 		/*
12924 		 * didn't match any device by searching
12925 		 */
12926 		return (DDI_FAILURE);
12927 	}
12928 	/*
12929 	 * If the LUN already exists and the status is online,
12930 	 * we just return the pointer to dev_info_t directly.
12931 	 * For the mdi_pathinfo node, we'll handle it in
12932 	 * mptsas_create_virt_lun()
12933 	 * TODO should be also in mptsas_handle_dr
12934 	 */
12935 
12936 	*lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12937 	if (*lundip != NULL) {
12938 		/*
12939 		 * TODO Another senario is, we hotplug the same disk
12940 		 * on the same slot, the devhdl changed, is this
12941 		 * possible?
12942 		 * tgt_private->t_private != ptgt
12943 		 */
12944 		if (sasaddr != ptgt->m_sas_wwn) {
12945 			/*
12946 			 * The device has changed although the devhdl is the
12947 			 * same (Enclosure mapping mode, change drive on the
12948 			 * same slot)
12949 			 */
12950 			return (DDI_FAILURE);
12951 		}
12952 		return (DDI_SUCCESS);
12953 	}
12954 
12955 	if (phymask == 0) {
12956 		/*
12957 		 * Configure IR volume
12958 		 */
12959 		rval =  mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
12960 		return (rval);
12961 	}
12962 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12963 
12964 	return (rval);
12965 }
12966 
12967 static int
12968 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
12969     dev_info_t **lundip)
12970 {
12971 	int		rval;
12972 	mptsas_t	*mpt = DIP2MPT(pdip);
12973 	int		phymask;
12974 	mptsas_target_t	*ptgt = NULL;
12975 
12976 	/*
12977 	 * Get the physical port associated to the iport
12978 	 */
12979 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12980 	    "phymask", 0);
12981 
12982 	ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
12983 	if (ptgt == NULL) {
12984 		/*
12985 		 * didn't match any device by searching
12986 		 */
12987 		return (DDI_FAILURE);
12988 	}
12989 
12990 	/*
12991 	 * If the LUN already exists and the status is online,
12992 	 * we just return the pointer to dev_info_t directly.
12993 	 * For the mdi_pathinfo node, we'll handle it in
12994 	 * mptsas_create_virt_lun().
12995 	 */
12996 
12997 	*lundip = mptsas_find_child_phy(pdip, phy);
12998 	if (*lundip != NULL) {
12999 		return (DDI_SUCCESS);
13000 	}
13001 
13002 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13003 
13004 	return (rval);
13005 }
13006 
13007 static int
13008 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13009     uint8_t *lun_addr_type)
13010 {
13011 	uint32_t	lun_idx = 0;
13012 
13013 	ASSERT(lun_num != NULL);
13014 	ASSERT(lun_addr_type != NULL);
13015 
13016 	lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13017 	/* determine report luns addressing type */
13018 	switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13019 		/*
13020 		 * Vendors in the field have been found to be concatenating
13021 		 * bus/target/lun to equal the complete lun value instead
13022 		 * of switching to flat space addressing
13023 		 */
13024 		/* 00b - peripheral device addressing method */
13025 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13026 		/* FALLTHRU */
13027 		/* 10b - logical unit addressing method */
13028 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13029 		/* FALLTHRU */
13030 		/* 01b - flat space addressing method */
13031 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13032 		/* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13033 		*lun_addr_type = (buf[lun_idx] &
13034 		    MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13035 		*lun_num = (buf[lun_idx] & 0x3F) << 8;
13036 		*lun_num |= buf[lun_idx + 1];
13037 		return (DDI_SUCCESS);
13038 	default:
13039 		return (DDI_FAILURE);
13040 	}
13041 }
13042 
13043 static int
13044 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13045 {
13046 	struct buf		*repluns_bp = NULL;
13047 	struct scsi_address	ap;
13048 	uchar_t			cdb[CDB_GROUP5];
13049 	int			ret = DDI_FAILURE;
13050 	int			retry = 0;
13051 	int			lun_list_len = 0;
13052 	uint16_t		lun_num = 0;
13053 	uint8_t			lun_addr_type = 0;
13054 	uint32_t		lun_cnt = 0;
13055 	uint32_t		lun_total = 0;
13056 	dev_info_t		*cdip = NULL;
13057 	uint16_t		*saved_repluns = NULL;
13058 	char			*buffer = NULL;
13059 	int			buf_len = 128;
13060 	mptsas_t		*mpt = DIP2MPT(pdip);
13061 	uint64_t		sas_wwn = 0;
13062 	uint8_t			phy = 0xFF;
13063 	uint32_t		dev_info = 0;
13064 
13065 	mutex_enter(&mpt->m_mutex);
13066 	sas_wwn = ptgt->m_sas_wwn;
13067 	phy = ptgt->m_phynum;
13068 	dev_info = ptgt->m_deviceinfo;
13069 	mutex_exit(&mpt->m_mutex);
13070 
13071 	if (sas_wwn == 0) {
13072 		/*
13073 		 * It's a SATA without Device Name
13074 		 * So don't try multi-LUNs
13075 		 */
13076 		if (mptsas_find_child_phy(pdip, phy)) {
13077 			return (DDI_SUCCESS);
13078 		} else {
13079 			/*
13080 			 * need configure and create node
13081 			 */
13082 			return (DDI_FAILURE);
13083 		}
13084 	}
13085 
13086 	/*
13087 	 * WWN (SAS address or Device Name exist)
13088 	 */
13089 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13090 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13091 		/*
13092 		 * SATA device with Device Name
13093 		 * So don't try multi-LUNs
13094 		 */
13095 		if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13096 			return (DDI_SUCCESS);
13097 		} else {
13098 			return (DDI_FAILURE);
13099 		}
13100 	}
13101 
13102 	do {
13103 		ap.a_target = MPTSAS_INVALID_DEVHDL;
13104 		ap.a_lun = 0;
13105 		ap.a_hba_tran = mpt->m_tran;
13106 		repluns_bp = scsi_alloc_consistent_buf(&ap,
13107 		    (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13108 		if (repluns_bp == NULL) {
13109 			retry++;
13110 			continue;
13111 		}
13112 		bzero(cdb, CDB_GROUP5);
13113 		cdb[0] = SCMD_REPORT_LUNS;
13114 		cdb[6] = (buf_len & 0xff000000) >> 24;
13115 		cdb[7] = (buf_len & 0x00ff0000) >> 16;
13116 		cdb[8] = (buf_len & 0x0000ff00) >> 8;
13117 		cdb[9] = (buf_len & 0x000000ff);
13118 
13119 		ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13120 		    repluns_bp, NULL);
13121 		if (ret != DDI_SUCCESS) {
13122 			scsi_free_consistent_buf(repluns_bp);
13123 			retry++;
13124 			continue;
13125 		}
13126 		lun_list_len = BE_32(*(int *)((void *)(
13127 		    repluns_bp->b_un.b_addr)));
13128 		if (buf_len >= lun_list_len + 8) {
13129 			ret = DDI_SUCCESS;
13130 			break;
13131 		}
13132 		scsi_free_consistent_buf(repluns_bp);
13133 		buf_len = lun_list_len + 8;
13134 
13135 	} while (retry < 3);
13136 
13137 	if (ret != DDI_SUCCESS)
13138 		return (ret);
13139 	buffer = (char *)repluns_bp->b_un.b_addr;
13140 	/*
13141 	 * find out the number of luns returned by the SCSI ReportLun call
13142 	 * and allocate buffer space
13143 	 */
13144 	lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13145 	saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13146 	if (saved_repluns == NULL) {
13147 		scsi_free_consistent_buf(repluns_bp);
13148 		return (DDI_FAILURE);
13149 	}
13150 	for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13151 		if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13152 		    &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13153 			continue;
13154 		}
13155 		saved_repluns[lun_cnt] = lun_num;
13156 		if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13157 			ret = DDI_SUCCESS;
13158 		else
13159 			ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13160 			    ptgt);
13161 		if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13162 			(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13163 			    MPTSAS_DEV_GONE);
13164 		}
13165 	}
13166 	mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13167 	kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13168 	scsi_free_consistent_buf(repluns_bp);
13169 	return (DDI_SUCCESS);
13170 }
13171 
13172 static int
13173 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13174 {
13175 	int			rval = DDI_FAILURE;
13176 	struct scsi_inquiry	*sd_inq = NULL;
13177 	mptsas_t		*mpt = DIP2MPT(pdip);
13178 	mptsas_target_t		*ptgt = NULL;
13179 
13180 	mutex_enter(&mpt->m_mutex);
13181 	ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13182 	mutex_exit(&mpt->m_mutex);
13183 	if (ptgt == NULL) {
13184 		mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13185 		    "not found.", target);
13186 		return (rval);
13187 	}
13188 
13189 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13190 	rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13191 	    SUN_INQSIZE, 0, (uchar_t)0);
13192 
13193 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13194 		rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13195 		    0);
13196 	} else {
13197 		rval = DDI_FAILURE;
13198 	}
13199 
13200 	kmem_free(sd_inq, SUN_INQSIZE);
13201 	return (rval);
13202 }
13203 
13204 /*
13205  * configure all RAID volumes for virtual iport
13206  */
13207 static void
13208 mptsas_config_all_viport(dev_info_t *pdip)
13209 {
13210 	mptsas_t	*mpt = DIP2MPT(pdip);
13211 	int		config, vol;
13212 	int		target;
13213 	dev_info_t	*lundip = NULL;
13214 	mptsas_slots_t	*slots = mpt->m_active;
13215 
13216 	/*
13217 	 * Get latest RAID info and search for any Volume DevHandles.  If any
13218 	 * are found, configure the volume.
13219 	 */
13220 	mutex_enter(&mpt->m_mutex);
13221 	for (config = 0; config < slots->m_num_raid_configs; config++) {
13222 		for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13223 			if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13224 			    == 1) {
13225 				target = slots->m_raidconfig[config].
13226 				    m_raidvol[vol].m_raidhandle;
13227 				mutex_exit(&mpt->m_mutex);
13228 				(void) mptsas_config_raid(pdip, target,
13229 				    &lundip);
13230 				mutex_enter(&mpt->m_mutex);
13231 			}
13232 		}
13233 	}
13234 	mutex_exit(&mpt->m_mutex);
13235 }
13236 
13237 static void
13238 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13239     int lun_cnt, mptsas_target_t *ptgt)
13240 {
13241 	dev_info_t	*child = NULL, *savechild = NULL;
13242 	mdi_pathinfo_t	*pip = NULL, *savepip = NULL;
13243 	uint64_t	sas_wwn, wwid;
13244 	uint8_t		phy;
13245 	int		lun;
13246 	int		i;
13247 	int		find;
13248 	char		*addr;
13249 	char		*nodename;
13250 	mptsas_t	*mpt = DIP2MPT(pdip);
13251 
13252 	mutex_enter(&mpt->m_mutex);
13253 	wwid = ptgt->m_sas_wwn;
13254 	mutex_exit(&mpt->m_mutex);
13255 
13256 	child = ddi_get_child(pdip);
13257 	while (child) {
13258 		find = 0;
13259 		savechild = child;
13260 		child = ddi_get_next_sibling(child);
13261 
13262 		nodename = ddi_node_name(savechild);
13263 		if (strcmp(nodename, "smp") == 0) {
13264 			continue;
13265 		}
13266 
13267 		addr = ddi_get_name_addr(savechild);
13268 		if (addr == NULL) {
13269 			continue;
13270 		}
13271 
13272 		if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13273 		    DDI_SUCCESS) {
13274 			continue;
13275 		}
13276 
13277 		if (wwid == sas_wwn) {
13278 			for (i = 0; i < lun_cnt; i++) {
13279 				if (repluns[i] == lun) {
13280 					find = 1;
13281 					break;
13282 				}
13283 			}
13284 		} else {
13285 			continue;
13286 		}
13287 		if (find == 0) {
13288 			/*
13289 			 * The lun has not been there already
13290 			 */
13291 			(void) mptsas_offline_lun(pdip, savechild, NULL,
13292 			    NDI_DEVI_REMOVE);
13293 		}
13294 	}
13295 
13296 	pip = mdi_get_next_client_path(pdip, NULL);
13297 	while (pip) {
13298 		find = 0;
13299 		savepip = pip;
13300 		addr = MDI_PI(pip)->pi_addr;
13301 
13302 		pip = mdi_get_next_client_path(pdip, pip);
13303 
13304 		if (addr == NULL) {
13305 			continue;
13306 		}
13307 
13308 		if (mptsas_parse_address(addr, &sas_wwn, &phy,
13309 		    &lun) != DDI_SUCCESS) {
13310 			continue;
13311 		}
13312 
13313 		if (sas_wwn == wwid) {
13314 			for (i = 0; i < lun_cnt; i++) {
13315 				if (repluns[i] == lun) {
13316 					find = 1;
13317 					break;
13318 				}
13319 			}
13320 		} else {
13321 			continue;
13322 		}
13323 
13324 		if (find == 0) {
13325 			/*
13326 			 * The lun has not been there already
13327 			 */
13328 			(void) mptsas_offline_lun(pdip, NULL, savepip,
13329 			    NDI_DEVI_REMOVE);
13330 		}
13331 	}
13332 }
13333 
13334 void
13335 mptsas_update_hashtab(struct mptsas *mpt)
13336 {
13337 	uint32_t	page_address;
13338 	int		rval = 0;
13339 	uint16_t	dev_handle;
13340 	mptsas_target_t	*ptgt = NULL;
13341 	mptsas_smp_t	smp_node;
13342 
13343 	/*
13344 	 * Get latest RAID info.
13345 	 */
13346 	(void) mptsas_get_raid_info(mpt);
13347 
13348 	dev_handle = mpt->m_smp_devhdl;
13349 	for (; mpt->m_done_traverse_smp == 0; ) {
13350 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13351 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13352 		if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13353 		    != DDI_SUCCESS) {
13354 			break;
13355 		}
13356 		mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13357 		(void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13358 	}
13359 
13360 	/*
13361 	 * Config target devices
13362 	 */
13363 	dev_handle = mpt->m_dev_handle;
13364 
13365 	/*
13366 	 * Do loop to get sas device page 0 by GetNextHandle till the
13367 	 * the last handle. If the sas device is a SATA/SSP target,
13368 	 * we try to config it.
13369 	 */
13370 	for (; mpt->m_done_traverse_dev == 0; ) {
13371 		ptgt = NULL;
13372 		page_address =
13373 		    (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13374 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13375 		    (uint32_t)dev_handle;
13376 		rval = mptsas_get_target_device_info(mpt, page_address,
13377 		    &dev_handle, &ptgt);
13378 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
13379 		    (rval == DEV_INFO_FAIL_ALLOC)) {
13380 			break;
13381 		}
13382 
13383 		mpt->m_dev_handle = dev_handle;
13384 	}
13385 
13386 }
13387 
13388 void
13389 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
13390 {
13391 	mptsas_hash_data_t *data;
13392 	data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13393 	while (data != NULL) {
13394 		data->devhdl = MPTSAS_INVALID_DEVHDL;
13395 		data->device_info = 0;
13396 		/*
13397 		 * For tgttbl, clear dr_flag.
13398 		 */
13399 		data->dr_flag = MPTSAS_DR_INACTIVE;
13400 		data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13401 	}
13402 }
13403 
13404 void
13405 mptsas_update_driver_data(struct mptsas *mpt)
13406 {
13407 	/*
13408 	 * TODO after hard reset, update the driver data structures
13409 	 * 1. update port/phymask mapping table mpt->m_phy_info
13410 	 * 2. invalid all the entries in hash table
13411 	 *    m_devhdl = 0xffff and m_deviceinfo = 0
13412 	 * 3. call sas_device_page/expander_page to update hash table
13413 	 */
13414 	mptsas_update_phymask(mpt);
13415 	/*
13416 	 * Invalid the existing entries
13417 	 */
13418 	mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
13419 	mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
13420 	mpt->m_done_traverse_dev = 0;
13421 	mpt->m_done_traverse_smp = 0;
13422 	mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13423 	mptsas_update_hashtab(mpt);
13424 }
13425 
13426 static void
13427 mptsas_config_all(dev_info_t *pdip)
13428 {
13429 	dev_info_t	*smpdip = NULL;
13430 	mptsas_t	*mpt = DIP2MPT(pdip);
13431 	int		phymask = 0;
13432 	mptsas_phymask_t phy_mask;
13433 	mptsas_target_t	*ptgt = NULL;
13434 	mptsas_smp_t	*psmp;
13435 
13436 	/*
13437 	 * Get the phymask associated to the iport
13438 	 */
13439 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13440 	    "phymask", 0);
13441 
13442 	/*
13443 	 * Enumerate RAID volumes here (phymask == 0).
13444 	 */
13445 	if (phymask == 0) {
13446 		mptsas_config_all_viport(pdip);
13447 		return;
13448 	}
13449 
13450 	mutex_enter(&mpt->m_mutex);
13451 
13452 	if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13453 		mptsas_update_hashtab(mpt);
13454 	}
13455 
13456 	psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
13457 	    MPTSAS_HASH_FIRST);
13458 	while (psmp != NULL) {
13459 		phy_mask = psmp->m_phymask;
13460 		if (phy_mask == phymask) {
13461 			smpdip = NULL;
13462 			mutex_exit(&mpt->m_mutex);
13463 			(void) mptsas_online_smp(pdip, psmp, &smpdip);
13464 			mutex_enter(&mpt->m_mutex);
13465 		}
13466 		psmp = (mptsas_smp_t *)mptsas_hash_traverse(
13467 		    &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
13468 	}
13469 
13470 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
13471 	    MPTSAS_HASH_FIRST);
13472 	while (ptgt != NULL) {
13473 		phy_mask = ptgt->m_phymask;
13474 		if (phy_mask == phymask) {
13475 			mutex_exit(&mpt->m_mutex);
13476 			(void) mptsas_config_target(pdip, ptgt);
13477 			mutex_enter(&mpt->m_mutex);
13478 		}
13479 
13480 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
13481 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
13482 	}
13483 	mutex_exit(&mpt->m_mutex);
13484 }
13485 
13486 static int
13487 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13488 {
13489 	int		rval = DDI_FAILURE;
13490 	dev_info_t	*tdip;
13491 
13492 	rval = mptsas_config_luns(pdip, ptgt);
13493 	if (rval != DDI_SUCCESS) {
13494 		/*
13495 		 * The return value means the SCMD_REPORT_LUNS
13496 		 * did not execute successfully. The target maybe
13497 		 * doesn't support such command.
13498 		 */
13499 		rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13500 	}
13501 	return (rval);
13502 }
13503 
13504 /*
13505  * Return fail if not all the childs/paths are freed.
13506  * if there is any path under the HBA, the return value will be always fail
13507  * because we didn't call mdi_pi_free for path
13508  */
13509 static int
13510 mptsas_offline_target(dev_info_t *pdip, char *name)
13511 {
13512 	dev_info_t		*child = NULL, *prechild = NULL;
13513 	mdi_pathinfo_t		*pip = NULL, *savepip = NULL;
13514 	int			tmp_rval, rval = DDI_SUCCESS;
13515 	char			*addr, *cp;
13516 	size_t			s;
13517 	mptsas_t		*mpt = DIP2MPT(pdip);
13518 
13519 	child = ddi_get_child(pdip);
13520 	while (child) {
13521 		addr = ddi_get_name_addr(child);
13522 		prechild = child;
13523 		child = ddi_get_next_sibling(child);
13524 
13525 		if (addr == NULL) {
13526 			continue;
13527 		}
13528 		if ((cp = strchr(addr, ',')) == NULL) {
13529 			continue;
13530 		}
13531 
13532 		s = (uintptr_t)cp - (uintptr_t)addr;
13533 
13534 		if (strncmp(addr, name, s) != 0) {
13535 			continue;
13536 		}
13537 
13538 		tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13539 		    NDI_DEVI_REMOVE);
13540 		if (tmp_rval != DDI_SUCCESS) {
13541 			rval = DDI_FAILURE;
13542 			if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13543 			    prechild, MPTSAS_DEV_GONE) !=
13544 			    DDI_PROP_SUCCESS) {
13545 				mptsas_log(mpt, CE_WARN, "mptsas driver "
13546 				    "unable to create property for "
13547 				    "SAS %s (MPTSAS_DEV_GONE)", addr);
13548 			}
13549 		}
13550 	}
13551 
13552 	pip = mdi_get_next_client_path(pdip, NULL);
13553 	while (pip) {
13554 		addr = MDI_PI(pip)->pi_addr;
13555 		savepip = pip;
13556 		pip = mdi_get_next_client_path(pdip, pip);
13557 		if (addr == NULL) {
13558 			continue;
13559 		}
13560 
13561 		if ((cp = strchr(addr, ',')) == NULL) {
13562 			continue;
13563 		}
13564 
13565 		s = (uintptr_t)cp - (uintptr_t)addr;
13566 
13567 		if (strncmp(addr, name, s) != 0) {
13568 			continue;
13569 		}
13570 
13571 		(void) mptsas_offline_lun(pdip, NULL, savepip,
13572 		    NDI_DEVI_REMOVE);
13573 		/*
13574 		 * driver will not invoke mdi_pi_free, so path will not
13575 		 * be freed forever, return DDI_FAILURE.
13576 		 */
13577 		rval = DDI_FAILURE;
13578 	}
13579 	return (rval);
13580 }
13581 
13582 static int
13583 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13584     mdi_pathinfo_t *rpip, uint_t flags)
13585 {
13586 	int		rval = DDI_FAILURE;
13587 	char		*devname;
13588 	dev_info_t	*cdip, *parent;
13589 
13590 	if (rpip != NULL) {
13591 		parent = scsi_vhci_dip;
13592 		cdip = mdi_pi_get_client(rpip);
13593 	} else if (rdip != NULL) {
13594 		parent = pdip;
13595 		cdip = rdip;
13596 	} else {
13597 		return (DDI_FAILURE);
13598 	}
13599 
13600 	/*
13601 	 * Make sure node is attached otherwise
13602 	 * it won't have related cache nodes to
13603 	 * clean up.  i_ddi_devi_attached is
13604 	 * similiar to i_ddi_node_state(cdip) >=
13605 	 * DS_ATTACHED.
13606 	 */
13607 	if (i_ddi_devi_attached(cdip)) {
13608 
13609 		/* Get full devname */
13610 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13611 		(void) ddi_deviname(cdip, devname);
13612 		/* Clean cache */
13613 		(void) devfs_clean(parent, devname + 1,
13614 		    DV_CLEAN_FORCE);
13615 		kmem_free(devname, MAXNAMELEN + 1);
13616 	}
13617 	if (rpip != NULL) {
13618 		if (MDI_PI_IS_OFFLINE(rpip)) {
13619 			rval = DDI_SUCCESS;
13620 		} else {
13621 			rval = mdi_pi_offline(rpip, 0);
13622 		}
13623 	} else {
13624 		rval = ndi_devi_offline(cdip, flags);
13625 	}
13626 
13627 	return (rval);
13628 }
13629 
13630 static dev_info_t *
13631 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13632 {
13633 	dev_info_t	*child = NULL;
13634 	char		*smp_wwn = NULL;
13635 
13636 	child = ddi_get_child(parent);
13637 	while (child) {
13638 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13639 		    DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13640 		    != DDI_SUCCESS) {
13641 			child = ddi_get_next_sibling(child);
13642 			continue;
13643 		}
13644 
13645 		if (strcmp(smp_wwn, str_wwn) == 0) {
13646 			ddi_prop_free(smp_wwn);
13647 			break;
13648 		}
13649 		child = ddi_get_next_sibling(child);
13650 		ddi_prop_free(smp_wwn);
13651 	}
13652 	return (child);
13653 }
13654 
13655 static int
13656 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13657 {
13658 	int		rval = DDI_FAILURE;
13659 	char		*devname;
13660 	char		wwn_str[MPTSAS_WWN_STRLEN];
13661 	dev_info_t	*cdip;
13662 
13663 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13664 
13665 	cdip = mptsas_find_smp_child(pdip, wwn_str);
13666 
13667 	if (cdip == NULL)
13668 		return (DDI_SUCCESS);
13669 
13670 	/*
13671 	 * Make sure node is attached otherwise
13672 	 * it won't have related cache nodes to
13673 	 * clean up.  i_ddi_devi_attached is
13674 	 * similiar to i_ddi_node_state(cdip) >=
13675 	 * DS_ATTACHED.
13676 	 */
13677 	if (i_ddi_devi_attached(cdip)) {
13678 
13679 		/* Get full devname */
13680 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13681 		(void) ddi_deviname(cdip, devname);
13682 		/* Clean cache */
13683 		(void) devfs_clean(pdip, devname + 1,
13684 		    DV_CLEAN_FORCE);
13685 		kmem_free(devname, MAXNAMELEN + 1);
13686 	}
13687 
13688 	rval = ndi_devi_offline(cdip, flags);
13689 
13690 	return (rval);
13691 }
13692 
13693 static dev_info_t *
13694 mptsas_find_child(dev_info_t *pdip, char *name)
13695 {
13696 	dev_info_t	*child = NULL;
13697 	char		*rname = NULL;
13698 	int		rval = DDI_FAILURE;
13699 
13700 	rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13701 
13702 	child = ddi_get_child(pdip);
13703 	while (child) {
13704 		rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13705 		if (rval != DDI_SUCCESS) {
13706 			child = ddi_get_next_sibling(child);
13707 			bzero(rname, SCSI_MAXNAMELEN);
13708 			continue;
13709 		}
13710 
13711 		if (strcmp(rname, name) == 0) {
13712 			break;
13713 		}
13714 		child = ddi_get_next_sibling(child);
13715 		bzero(rname, SCSI_MAXNAMELEN);
13716 	}
13717 
13718 	kmem_free(rname, SCSI_MAXNAMELEN);
13719 
13720 	return (child);
13721 }
13722 
13723 
13724 static dev_info_t *
13725 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13726 {
13727 	dev_info_t	*child = NULL;
13728 	char		*name = NULL;
13729 	char		*addr = NULL;
13730 
13731 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13732 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13733 	(void) sprintf(name, "%016"PRIx64, sasaddr);
13734 	(void) sprintf(addr, "w%s,%x", name, lun);
13735 	child = mptsas_find_child(pdip, addr);
13736 	kmem_free(name, SCSI_MAXNAMELEN);
13737 	kmem_free(addr, SCSI_MAXNAMELEN);
13738 	return (child);
13739 }
13740 
13741 static dev_info_t *
13742 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13743 {
13744 	dev_info_t	*child;
13745 	char		*addr;
13746 
13747 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13748 	(void) sprintf(addr, "p%x,0", phy);
13749 	child = mptsas_find_child(pdip, addr);
13750 	kmem_free(addr, SCSI_MAXNAMELEN);
13751 	return (child);
13752 }
13753 
13754 static mdi_pathinfo_t *
13755 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13756 {
13757 	mdi_pathinfo_t	*path;
13758 	char		*addr = NULL;
13759 
13760 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13761 	(void) sprintf(addr, "p%x,0", phy);
13762 	path = mdi_pi_find(pdip, NULL, addr);
13763 	kmem_free(addr, SCSI_MAXNAMELEN);
13764 	return (path);
13765 }
13766 
13767 static mdi_pathinfo_t *
13768 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13769 {
13770 	mdi_pathinfo_t	*path;
13771 	char		*name = NULL;
13772 	char		*addr = NULL;
13773 
13774 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13775 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13776 	(void) sprintf(name, "%016"PRIx64, sasaddr);
13777 	(void) sprintf(addr, "w%s,%x", name, lun);
13778 	path = mdi_pi_find(parent, NULL, addr);
13779 	kmem_free(name, SCSI_MAXNAMELEN);
13780 	kmem_free(addr, SCSI_MAXNAMELEN);
13781 
13782 	return (path);
13783 }
13784 
13785 static int
13786 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13787     dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13788 {
13789 	int			i = 0;
13790 	uchar_t			*inq83 = NULL;
13791 	int			inq83_len1 = 0xFF;
13792 	int			inq83_len = 0;
13793 	int			rval = DDI_FAILURE;
13794 	ddi_devid_t		devid;
13795 	char			*guid = NULL;
13796 	int			target = ptgt->m_devhdl;
13797 	mdi_pathinfo_t		*pip = NULL;
13798 	mptsas_t		*mpt = DIP2MPT(pdip);
13799 
13800 	/*
13801 	 * For DVD/CD ROM and tape devices and optical
13802 	 * devices, we won't try to enumerate them under
13803 	 * scsi_vhci, so no need to try page83
13804 	 */
13805 	if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13806 	    sd_inq->inq_dtype == DTYPE_OPTICAL ||
13807 	    sd_inq->inq_dtype == DTYPE_ESI))
13808 		goto create_lun;
13809 
13810 	/*
13811 	 * The LCA returns good SCSI status, but corrupt page 83 data the first
13812 	 * time it is queried. The solution is to keep trying to request page83
13813 	 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13814 	 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13815 	 * give up to get VPD page at this stage and fail the enumeration.
13816 	 */
13817 
13818 	inq83	= kmem_zalloc(inq83_len1, KM_SLEEP);
13819 
13820 	for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13821 		rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13822 		    inq83_len1, &inq83_len, 1);
13823 		if (rval != 0) {
13824 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13825 			    "0x83 for target:%x, lun:%x failed!", target, lun);
13826 			goto out;
13827 		}
13828 		/*
13829 		 * create DEVID from inquiry data
13830 		 */
13831 		if ((rval = ddi_devid_scsi_encode(
13832 		    DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13833 		    sizeof (struct scsi_inquiry), NULL, 0, inq83,
13834 		    (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13835 			/*
13836 			 * extract GUID from DEVID
13837 			 */
13838 			guid = ddi_devid_to_guid(devid);
13839 
13840 			/*
13841 			 * Do not enable MPXIO if the strlen(guid) is greater
13842 			 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13843 			 * handled by framework later.
13844 			 */
13845 			if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13846 				ddi_devid_free_guid(guid);
13847 				guid = NULL;
13848 				if (mpt->m_mpxio_enable == TRUE) {
13849 					mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13850 					    "lun:%x doesn't have a valid GUID, "
13851 					    "multipathing for this drive is "
13852 					    "not enabled", target, lun);
13853 				}
13854 			}
13855 
13856 			/*
13857 			 * devid no longer needed
13858 			 */
13859 			ddi_devid_free(devid);
13860 			break;
13861 		} else if (rval == DDI_NOT_WELL_FORMED) {
13862 			/*
13863 			 * return value of ddi_devid_scsi_encode equal to
13864 			 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13865 			 * to retry inquiry page 0x83 and get GUID.
13866 			 */
13867 			NDBG20(("Not well formed devid, retry..."));
13868 			delay(1 * drv_usectohz(1000000));
13869 			continue;
13870 		} else {
13871 			mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13872 			    "path target:%x, lun:%x", target, lun);
13873 			rval = DDI_FAILURE;
13874 			goto create_lun;
13875 		}
13876 	}
13877 
13878 	if (i == mptsas_inq83_retry_timeout) {
13879 		mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13880 		    "for path target:%x, lun:%x", target, lun);
13881 	}
13882 
13883 	rval = DDI_FAILURE;
13884 
13885 create_lun:
13886 	if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13887 		rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13888 		    ptgt, lun);
13889 	}
13890 	if (rval != DDI_SUCCESS) {
13891 		rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13892 		    ptgt, lun);
13893 
13894 	}
13895 out:
13896 	if (guid != NULL) {
13897 		/*
13898 		 * guid no longer needed
13899 		 */
13900 		ddi_devid_free_guid(guid);
13901 	}
13902 	if (inq83 != NULL)
13903 		kmem_free(inq83, inq83_len1);
13904 	return (rval);
13905 }
13906 
13907 static int
13908 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13909     dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13910 {
13911 	int			target;
13912 	char			*nodename = NULL;
13913 	char			**compatible = NULL;
13914 	int			ncompatible	= 0;
13915 	int			mdi_rtn = MDI_FAILURE;
13916 	int			rval = DDI_FAILURE;
13917 	char			*old_guid = NULL;
13918 	mptsas_t		*mpt = DIP2MPT(pdip);
13919 	char			*lun_addr = NULL;
13920 	char			*wwn_str = NULL;
13921 	char			*attached_wwn_str = NULL;
13922 	char			*component = NULL;
13923 	uint8_t			phy = 0xFF;
13924 	uint64_t		sas_wwn;
13925 	int64_t			lun64 = 0;
13926 	uint32_t		devinfo;
13927 	uint16_t		dev_hdl;
13928 	uint16_t		pdev_hdl;
13929 	uint64_t		dev_sas_wwn;
13930 	uint64_t		pdev_sas_wwn;
13931 	uint32_t		pdev_info;
13932 	uint8_t			physport;
13933 	uint8_t			phy_id;
13934 	uint32_t		page_address;
13935 	uint16_t		bay_num, enclosure;
13936 	char			pdev_wwn_str[MPTSAS_WWN_STRLEN];
13937 	uint32_t		dev_info;
13938 
13939 	mutex_enter(&mpt->m_mutex);
13940 	target = ptgt->m_devhdl;
13941 	sas_wwn = ptgt->m_sas_wwn;
13942 	devinfo = ptgt->m_deviceinfo;
13943 	phy = ptgt->m_phynum;
13944 	mutex_exit(&mpt->m_mutex);
13945 
13946 	if (sas_wwn) {
13947 		*pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13948 	} else {
13949 		*pip = mptsas_find_path_phy(pdip, phy);
13950 	}
13951 
13952 	if (*pip != NULL) {
13953 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13954 		ASSERT(*lun_dip != NULL);
13955 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
13956 		    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
13957 		    MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
13958 			if (strncmp(guid, old_guid, strlen(guid)) == 0) {
13959 				/*
13960 				 * Same path back online again.
13961 				 */
13962 				(void) ddi_prop_free(old_guid);
13963 				if ((!MDI_PI_IS_ONLINE(*pip)) &&
13964 				    (!MDI_PI_IS_STANDBY(*pip)) &&
13965 				    (ptgt->m_tgt_unconfigured == 0)) {
13966 					rval = mdi_pi_online(*pip, 0);
13967 					mutex_enter(&mpt->m_mutex);
13968 					(void) mptsas_set_led_status(mpt, ptgt,
13969 					    0);
13970 					mutex_exit(&mpt->m_mutex);
13971 				} else {
13972 					rval = DDI_SUCCESS;
13973 				}
13974 				if (rval != DDI_SUCCESS) {
13975 					mptsas_log(mpt, CE_WARN, "path:target: "
13976 					    "%x, lun:%x online failed!", target,
13977 					    lun);
13978 					*pip = NULL;
13979 					*lun_dip = NULL;
13980 				}
13981 				return (rval);
13982 			} else {
13983 				/*
13984 				 * The GUID of the LUN has changed which maybe
13985 				 * because customer mapped another volume to the
13986 				 * same LUN.
13987 				 */
13988 				mptsas_log(mpt, CE_WARN, "The GUID of the "
13989 				    "target:%x, lun:%x was changed, maybe "
13990 				    "because someone mapped another volume "
13991 				    "to the same LUN", target, lun);
13992 				(void) ddi_prop_free(old_guid);
13993 				if (!MDI_PI_IS_OFFLINE(*pip)) {
13994 					rval = mdi_pi_offline(*pip, 0);
13995 					if (rval != MDI_SUCCESS) {
13996 						mptsas_log(mpt, CE_WARN, "path:"
13997 						    "target:%x, lun:%x offline "
13998 						    "failed!", target, lun);
13999 						*pip = NULL;
14000 						*lun_dip = NULL;
14001 						return (DDI_FAILURE);
14002 					}
14003 				}
14004 				if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14005 					mptsas_log(mpt, CE_WARN, "path:target:"
14006 					    "%x, lun:%x free failed!", target,
14007 					    lun);
14008 					*pip = NULL;
14009 					*lun_dip = NULL;
14010 					return (DDI_FAILURE);
14011 				}
14012 			}
14013 		} else {
14014 			mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14015 			    "property for path:target:%x, lun:%x", target, lun);
14016 			*pip = NULL;
14017 			*lun_dip = NULL;
14018 			return (DDI_FAILURE);
14019 		}
14020 	}
14021 	scsi_hba_nodename_compatible_get(inq, NULL,
14022 	    inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14023 
14024 	/*
14025 	 * if nodename can't be determined then print a message and skip it
14026 	 */
14027 	if (nodename == NULL) {
14028 		mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14029 		    "driver for target%d lun %d dtype:0x%02x", target, lun,
14030 		    inq->inq_dtype);
14031 		return (DDI_FAILURE);
14032 	}
14033 
14034 	wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14035 	/* The property is needed by MPAPI */
14036 	(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14037 
14038 	lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14039 	if (guid) {
14040 		(void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14041 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14042 	} else {
14043 		(void) sprintf(lun_addr, "p%x,%x", phy, lun);
14044 		(void) sprintf(wwn_str, "p%x", phy);
14045 	}
14046 
14047 	mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14048 	    guid, lun_addr, compatible, ncompatible,
14049 	    0, pip);
14050 	if (mdi_rtn == MDI_SUCCESS) {
14051 
14052 		if (mdi_prop_update_string(*pip, MDI_GUID,
14053 		    guid) != DDI_SUCCESS) {
14054 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14055 			    "create prop for target %d lun %d (MDI_GUID)",
14056 			    target, lun);
14057 			mdi_rtn = MDI_FAILURE;
14058 			goto virt_create_done;
14059 		}
14060 
14061 		if (mdi_prop_update_int(*pip, LUN_PROP,
14062 		    lun) != DDI_SUCCESS) {
14063 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14064 			    "create prop for target %d lun %d (LUN_PROP)",
14065 			    target, lun);
14066 			mdi_rtn = MDI_FAILURE;
14067 			goto virt_create_done;
14068 		}
14069 		lun64 = (int64_t)lun;
14070 		if (mdi_prop_update_int64(*pip, LUN64_PROP,
14071 		    lun64) != DDI_SUCCESS) {
14072 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14073 			    "create prop for target %d (LUN64_PROP)",
14074 			    target);
14075 			mdi_rtn = MDI_FAILURE;
14076 			goto virt_create_done;
14077 		}
14078 		if (mdi_prop_update_string_array(*pip, "compatible",
14079 		    compatible, ncompatible) !=
14080 		    DDI_PROP_SUCCESS) {
14081 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14082 			    "create prop for target %d lun %d (COMPATIBLE)",
14083 			    target, lun);
14084 			mdi_rtn = MDI_FAILURE;
14085 			goto virt_create_done;
14086 		}
14087 		if (sas_wwn && (mdi_prop_update_string(*pip,
14088 		    SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14089 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14090 			    "create prop for target %d lun %d "
14091 			    "(target-port)", target, lun);
14092 			mdi_rtn = MDI_FAILURE;
14093 			goto virt_create_done;
14094 		} else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14095 		    "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14096 			/*
14097 			 * Direct attached SATA device without DeviceName
14098 			 */
14099 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14100 			    "create prop for SAS target %d lun %d "
14101 			    "(sata-phy)", target, lun);
14102 			mdi_rtn = MDI_FAILURE;
14103 			goto virt_create_done;
14104 		}
14105 		mutex_enter(&mpt->m_mutex);
14106 
14107 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14108 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14109 		    (uint32_t)ptgt->m_devhdl;
14110 		rval = mptsas_get_sas_device_page0(mpt, page_address,
14111 		    &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14112 		    &phy_id, &pdev_hdl, &bay_num, &enclosure);
14113 		if (rval != DDI_SUCCESS) {
14114 			mutex_exit(&mpt->m_mutex);
14115 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14116 			    "parent device for handle %d", page_address);
14117 			mdi_rtn = MDI_FAILURE;
14118 			goto virt_create_done;
14119 		}
14120 
14121 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14122 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14123 		rval = mptsas_get_sas_device_page0(mpt, page_address,
14124 		    &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14125 		    &phy_id, &pdev_hdl, &bay_num, &enclosure);
14126 		if (rval != DDI_SUCCESS) {
14127 			mutex_exit(&mpt->m_mutex);
14128 			mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14129 			    "device info for handle %d", page_address);
14130 			mdi_rtn = MDI_FAILURE;
14131 			goto virt_create_done;
14132 		}
14133 
14134 		mutex_exit(&mpt->m_mutex);
14135 
14136 		/*
14137 		 * If this device direct attached to the controller
14138 		 * set the attached-port to the base wwid
14139 		 */
14140 		if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14141 		    != DEVINFO_DIRECT_ATTACHED) {
14142 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14143 			    pdev_sas_wwn);
14144 		} else {
14145 			/*
14146 			 * Update the iport's attached-port to guid
14147 			 */
14148 			if (sas_wwn == 0) {
14149 				(void) sprintf(wwn_str, "p%x", phy);
14150 			} else {
14151 				(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14152 			}
14153 			if (ddi_prop_update_string(DDI_DEV_T_NONE,
14154 			    pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14155 			    DDI_PROP_SUCCESS) {
14156 				mptsas_log(mpt, CE_WARN,
14157 				    "mptsas unable to create "
14158 				    "property for iport target-port"
14159 				    " %s (sas_wwn)",
14160 				    wwn_str);
14161 				mdi_rtn = MDI_FAILURE;
14162 				goto virt_create_done;
14163 			}
14164 
14165 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14166 			    mpt->un.m_base_wwid);
14167 		}
14168 
14169 		if (mdi_prop_update_string(*pip,
14170 		    SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14171 		    DDI_PROP_SUCCESS) {
14172 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14173 			    "property for iport attached-port %s (sas_wwn)",
14174 			    attached_wwn_str);
14175 			mdi_rtn = MDI_FAILURE;
14176 			goto virt_create_done;
14177 		}
14178 
14179 
14180 		if (inq->inq_dtype == 0) {
14181 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14182 			/*
14183 			 * set obp path for pathinfo
14184 			 */
14185 			(void) snprintf(component, MAXPATHLEN,
14186 			    "disk@%s", lun_addr);
14187 
14188 			if (mdi_pi_pathname_obp_set(*pip, component) !=
14189 			    DDI_SUCCESS) {
14190 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14191 				    "unable to set obp-path for object %s",
14192 				    component);
14193 				mdi_rtn = MDI_FAILURE;
14194 				goto virt_create_done;
14195 			}
14196 		}
14197 
14198 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14199 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14200 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14201 			if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14202 			    "pm-capable", 1)) !=
14203 			    DDI_PROP_SUCCESS) {
14204 				mptsas_log(mpt, CE_WARN, "mptsas driver"
14205 				    "failed to create pm-capable "
14206 				    "property, target %d", target);
14207 				mdi_rtn = MDI_FAILURE;
14208 				goto virt_create_done;
14209 			}
14210 		}
14211 		/*
14212 		 * Create the phy-num property
14213 		 */
14214 		if (mdi_prop_update_int(*pip, "phy-num",
14215 		    ptgt->m_phynum) != DDI_SUCCESS) {
14216 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14217 			    "create phy-num property for target %d lun %d",
14218 			    target, lun);
14219 			mdi_rtn = MDI_FAILURE;
14220 			goto virt_create_done;
14221 		}
14222 		NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14223 		mdi_rtn = mdi_pi_online(*pip, 0);
14224 		if (mdi_rtn == MDI_SUCCESS) {
14225 			mutex_enter(&mpt->m_mutex);
14226 			if (mptsas_set_led_status(mpt, ptgt, 0) !=
14227 			    DDI_SUCCESS) {
14228 				NDBG14(("mptsas: clear LED for slot %x "
14229 				    "failed", ptgt->m_slot_num));
14230 			}
14231 			mutex_exit(&mpt->m_mutex);
14232 		}
14233 		if (mdi_rtn == MDI_NOT_SUPPORTED) {
14234 			mdi_rtn = MDI_FAILURE;
14235 		}
14236 virt_create_done:
14237 		if (*pip && mdi_rtn != MDI_SUCCESS) {
14238 			(void) mdi_pi_free(*pip, 0);
14239 			*pip = NULL;
14240 			*lun_dip = NULL;
14241 		}
14242 	}
14243 
14244 	scsi_hba_nodename_compatible_free(nodename, compatible);
14245 	if (lun_addr != NULL) {
14246 		kmem_free(lun_addr, SCSI_MAXNAMELEN);
14247 	}
14248 	if (wwn_str != NULL) {
14249 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14250 	}
14251 	if (component != NULL) {
14252 		kmem_free(component, MAXPATHLEN);
14253 	}
14254 
14255 	return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14256 }
14257 
14258 static int
14259 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14260     char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14261 {
14262 	int			target;
14263 	int			rval;
14264 	int			ndi_rtn = NDI_FAILURE;
14265 	uint64_t		be_sas_wwn;
14266 	char			*nodename = NULL;
14267 	char			**compatible = NULL;
14268 	int			ncompatible = 0;
14269 	int			instance = 0;
14270 	mptsas_t		*mpt = DIP2MPT(pdip);
14271 	char			*wwn_str = NULL;
14272 	char			*component = NULL;
14273 	char			*attached_wwn_str = NULL;
14274 	uint8_t			phy = 0xFF;
14275 	uint64_t		sas_wwn;
14276 	uint32_t		devinfo;
14277 	uint16_t		dev_hdl;
14278 	uint16_t		pdev_hdl;
14279 	uint64_t		pdev_sas_wwn;
14280 	uint64_t		dev_sas_wwn;
14281 	uint32_t		pdev_info;
14282 	uint8_t			physport;
14283 	uint8_t			phy_id;
14284 	uint32_t		page_address;
14285 	uint16_t		bay_num, enclosure;
14286 	char			pdev_wwn_str[MPTSAS_WWN_STRLEN];
14287 	uint32_t		dev_info;
14288 	int64_t			lun64 = 0;
14289 
14290 	mutex_enter(&mpt->m_mutex);
14291 	target = ptgt->m_devhdl;
14292 	sas_wwn = ptgt->m_sas_wwn;
14293 	devinfo = ptgt->m_deviceinfo;
14294 	phy = ptgt->m_phynum;
14295 	mutex_exit(&mpt->m_mutex);
14296 
14297 	/*
14298 	 * generate compatible property with binding-set "mpt"
14299 	 */
14300 	scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14301 	    &nodename, &compatible, &ncompatible);
14302 
14303 	/*
14304 	 * if nodename can't be determined then print a message and skip it
14305 	 */
14306 	if (nodename == NULL) {
14307 		mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14308 		    "for target %d lun %d", target, lun);
14309 		return (DDI_FAILURE);
14310 	}
14311 
14312 	ndi_rtn = ndi_devi_alloc(pdip, nodename,
14313 	    DEVI_SID_NODEID, lun_dip);
14314 
14315 	/*
14316 	 * if lun alloc success, set props
14317 	 */
14318 	if (ndi_rtn == NDI_SUCCESS) {
14319 
14320 		if (ndi_prop_update_int(DDI_DEV_T_NONE,
14321 		    *lun_dip, LUN_PROP, lun) !=
14322 		    DDI_PROP_SUCCESS) {
14323 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14324 			    "property for target %d lun %d (LUN_PROP)",
14325 			    target, lun);
14326 			ndi_rtn = NDI_FAILURE;
14327 			goto phys_create_done;
14328 		}
14329 
14330 		lun64 = (int64_t)lun;
14331 		if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14332 		    *lun_dip, LUN64_PROP, lun64) !=
14333 		    DDI_PROP_SUCCESS) {
14334 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14335 			    "property for target %d lun64 %d (LUN64_PROP)",
14336 			    target, lun);
14337 			ndi_rtn = NDI_FAILURE;
14338 			goto phys_create_done;
14339 		}
14340 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14341 		    *lun_dip, "compatible", compatible, ncompatible)
14342 		    != DDI_PROP_SUCCESS) {
14343 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14344 			    "property for target %d lun %d (COMPATIBLE)",
14345 			    target, lun);
14346 			ndi_rtn = NDI_FAILURE;
14347 			goto phys_create_done;
14348 		}
14349 
14350 		/*
14351 		 * We need the SAS WWN for non-multipath devices, so
14352 		 * we'll use the same property as that multipathing
14353 		 * devices need to present for MPAPI. If we don't have
14354 		 * a WWN (e.g. parallel SCSI), don't create the prop.
14355 		 */
14356 		wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14357 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14358 		if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14359 		    *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14360 		    != DDI_PROP_SUCCESS) {
14361 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
14362 			    "create property for SAS target %d lun %d "
14363 			    "(target-port)", target, lun);
14364 			ndi_rtn = NDI_FAILURE;
14365 			goto phys_create_done;
14366 		}
14367 
14368 		be_sas_wwn = BE_64(sas_wwn);
14369 		if (sas_wwn && ndi_prop_update_byte_array(
14370 		    DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14371 		    (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14372 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
14373 			    "create property for SAS target %d lun %d "
14374 			    "(port-wwn)", target, lun);
14375 			ndi_rtn = NDI_FAILURE;
14376 			goto phys_create_done;
14377 		} else if ((sas_wwn == 0) && (ndi_prop_update_int(
14378 		    DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14379 		    DDI_PROP_SUCCESS)) {
14380 			/*
14381 			 * Direct attached SATA device without DeviceName
14382 			 */
14383 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
14384 			    "create property for SAS target %d lun %d "
14385 			    "(sata-phy)", target, lun);
14386 			ndi_rtn = NDI_FAILURE;
14387 			goto phys_create_done;
14388 		}
14389 
14390 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14391 		    *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14392 			mptsas_log(mpt, CE_WARN, "mptsas unable to"
14393 			    "create property for SAS target %d lun %d"
14394 			    " (SAS_PROP)", target, lun);
14395 			ndi_rtn = NDI_FAILURE;
14396 			goto phys_create_done;
14397 		}
14398 		if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14399 		    *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14400 			mptsas_log(mpt, CE_WARN, "mptsas unable "
14401 			    "to create guid property for target %d "
14402 			    "lun %d", target, lun);
14403 			ndi_rtn = NDI_FAILURE;
14404 			goto phys_create_done;
14405 		}
14406 
14407 		/*
14408 		 * The following code is to set properties for SM-HBA support,
14409 		 * it doesn't apply to RAID volumes
14410 		 */
14411 		if (ptgt->m_phymask == 0)
14412 			goto phys_raid_lun;
14413 
14414 		mutex_enter(&mpt->m_mutex);
14415 
14416 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14417 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14418 		    (uint32_t)ptgt->m_devhdl;
14419 		rval = mptsas_get_sas_device_page0(mpt, page_address,
14420 		    &dev_hdl, &dev_sas_wwn, &dev_info,
14421 		    &physport, &phy_id, &pdev_hdl,
14422 		    &bay_num, &enclosure);
14423 		if (rval != DDI_SUCCESS) {
14424 			mutex_exit(&mpt->m_mutex);
14425 			mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14426 			    "parent device for handle %d.", page_address);
14427 			ndi_rtn = NDI_FAILURE;
14428 			goto phys_create_done;
14429 		}
14430 
14431 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14432 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14433 		rval = mptsas_get_sas_device_page0(mpt, page_address,
14434 		    &dev_hdl, &pdev_sas_wwn, &pdev_info,
14435 		    &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14436 		if (rval != DDI_SUCCESS) {
14437 			mutex_exit(&mpt->m_mutex);
14438 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14439 			    "device for handle %d.", page_address);
14440 			ndi_rtn = NDI_FAILURE;
14441 			goto phys_create_done;
14442 		}
14443 
14444 		mutex_exit(&mpt->m_mutex);
14445 
14446 		/*
14447 		 * If this device direct attached to the controller
14448 		 * set the attached-port to the base wwid
14449 		 */
14450 		if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14451 		    != DEVINFO_DIRECT_ATTACHED) {
14452 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14453 			    pdev_sas_wwn);
14454 		} else {
14455 			/*
14456 			 * Update the iport's attached-port to guid
14457 			 */
14458 			if (sas_wwn == 0) {
14459 				(void) sprintf(wwn_str, "p%x", phy);
14460 			} else {
14461 				(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14462 			}
14463 			if (ddi_prop_update_string(DDI_DEV_T_NONE,
14464 			    pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14465 			    DDI_PROP_SUCCESS) {
14466 				mptsas_log(mpt, CE_WARN,
14467 				    "mptsas unable to create "
14468 				    "property for iport target-port"
14469 				    " %s (sas_wwn)",
14470 				    wwn_str);
14471 				ndi_rtn = NDI_FAILURE;
14472 				goto phys_create_done;
14473 			}
14474 
14475 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14476 			    mpt->un.m_base_wwid);
14477 		}
14478 
14479 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
14480 		    *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14481 		    DDI_PROP_SUCCESS) {
14482 			mptsas_log(mpt, CE_WARN,
14483 			    "mptsas unable to create "
14484 			    "property for iport attached-port %s (sas_wwn)",
14485 			    attached_wwn_str);
14486 			ndi_rtn = NDI_FAILURE;
14487 			goto phys_create_done;
14488 		}
14489 
14490 		if (IS_ATAPI_DEVICE(dev_info)) {
14491 			if (ndi_prop_update_string(DDI_DEV_T_NONE,
14492 			    *lun_dip, MPTSAS_VARIANT, "atapi") !=
14493 			    DDI_PROP_SUCCESS) {
14494 				mptsas_log(mpt, CE_WARN,
14495 				    "mptsas unable to create "
14496 				    "property for device variant ");
14497 				ndi_rtn = NDI_FAILURE;
14498 				goto phys_create_done;
14499 			}
14500 		}
14501 
14502 		if (IS_SATA_DEVICE(dev_info)) {
14503 			if (ndi_prop_update_string(DDI_DEV_T_NONE,
14504 			    *lun_dip, MPTSAS_VARIANT, "sata") !=
14505 			    DDI_PROP_SUCCESS) {
14506 				mptsas_log(mpt, CE_WARN,
14507 				    "mptsas unable to create "
14508 				    "property for device variant ");
14509 				ndi_rtn = NDI_FAILURE;
14510 				goto phys_create_done;
14511 			}
14512 		}
14513 phys_raid_lun:
14514 		/*
14515 		 * if this is a SAS controller, and the target is a SATA
14516 		 * drive, set the 'pm-capable' property for sd and if on
14517 		 * an OPL platform, also check if this is an ATAPI
14518 		 * device.
14519 		 */
14520 		instance = ddi_get_instance(mpt->m_dip);
14521 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14522 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14523 			NDBG2(("mptsas%d: creating pm-capable property, "
14524 			    "target %d", instance, target));
14525 
14526 			if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14527 			    *lun_dip, "pm-capable", 1)) !=
14528 			    DDI_PROP_SUCCESS) {
14529 				mptsas_log(mpt, CE_WARN, "mptsas "
14530 				    "failed to create pm-capable "
14531 				    "property, target %d", target);
14532 				ndi_rtn = NDI_FAILURE;
14533 				goto phys_create_done;
14534 			}
14535 
14536 		}
14537 
14538 		if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14539 			/*
14540 			 * add 'obp-path' properties for devinfo
14541 			 */
14542 			bzero(wwn_str, sizeof (wwn_str));
14543 			(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14544 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14545 			if (guid) {
14546 				(void) snprintf(component, MAXPATHLEN,
14547 				    "disk@w%s,%x", wwn_str, lun);
14548 			} else {
14549 				(void) snprintf(component, MAXPATHLEN,
14550 				    "disk@p%x,%x", phy, lun);
14551 			}
14552 			if (ddi_pathname_obp_set(*lun_dip, component)
14553 			    != DDI_SUCCESS) {
14554 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14555 				    "unable to set obp-path for SAS "
14556 				    "object %s", component);
14557 				ndi_rtn = NDI_FAILURE;
14558 				goto phys_create_done;
14559 			}
14560 		}
14561 		/*
14562 		 * Create the phy-num property for non-raid disk
14563 		 */
14564 		if (ptgt->m_phymask != 0) {
14565 			if (ndi_prop_update_int(DDI_DEV_T_NONE,
14566 			    *lun_dip, "phy-num", ptgt->m_phynum) !=
14567 			    DDI_PROP_SUCCESS) {
14568 				mptsas_log(mpt, CE_WARN, "mptsas driver "
14569 				    "failed to create phy-num property for "
14570 				    "target %d", target);
14571 				ndi_rtn = NDI_FAILURE;
14572 				goto phys_create_done;
14573 			}
14574 		}
14575 phys_create_done:
14576 		/*
14577 		 * If props were setup ok, online the lun
14578 		 */
14579 		if (ndi_rtn == NDI_SUCCESS) {
14580 			/*
14581 			 * Try to online the new node
14582 			 */
14583 			ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14584 		}
14585 		if (ndi_rtn == NDI_SUCCESS) {
14586 			mutex_enter(&mpt->m_mutex);
14587 			if (mptsas_set_led_status(mpt, ptgt, 0) !=
14588 			    DDI_SUCCESS) {
14589 				NDBG14(("mptsas: clear LED for tgt %x "
14590 				    "failed", ptgt->m_slot_num));
14591 			}
14592 			mutex_exit(&mpt->m_mutex);
14593 		}
14594 
14595 		/*
14596 		 * If success set rtn flag, else unwire alloc'd lun
14597 		 */
14598 		if (ndi_rtn != NDI_SUCCESS) {
14599 			NDBG12(("mptsas driver unable to online "
14600 			    "target %d lun %d", target, lun));
14601 			ndi_prop_remove_all(*lun_dip);
14602 			(void) ndi_devi_free(*lun_dip);
14603 			*lun_dip = NULL;
14604 		}
14605 	}
14606 
14607 	scsi_hba_nodename_compatible_free(nodename, compatible);
14608 
14609 	if (wwn_str != NULL) {
14610 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14611 	}
14612 	if (component != NULL) {
14613 		kmem_free(component, MAXPATHLEN);
14614 	}
14615 
14616 
14617 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14618 }
14619 
14620 static int
14621 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14622 {
14623 	mptsas_t	*mpt = DIP2MPT(pdip);
14624 	struct smp_device smp_sd;
14625 
14626 	/* XXX An HBA driver should not be allocating an smp_device. */
14627 	bzero(&smp_sd, sizeof (struct smp_device));
14628 	smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14629 	bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14630 
14631 	if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14632 		return (NDI_FAILURE);
14633 	return (NDI_SUCCESS);
14634 }
14635 
14636 static int
14637 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14638 {
14639 	mptsas_t	*mpt = DIP2MPT(pdip);
14640 	mptsas_smp_t	*psmp = NULL;
14641 	int		rval;
14642 	int		phymask;
14643 
14644 	/*
14645 	 * Get the physical port associated to the iport
14646 	 * PHYMASK TODO
14647 	 */
14648 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14649 	    "phymask", 0);
14650 	/*
14651 	 * Find the smp node in hash table with specified sas address and
14652 	 * physical port
14653 	 */
14654 	psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14655 	if (psmp == NULL) {
14656 		return (DDI_FAILURE);
14657 	}
14658 
14659 	rval = mptsas_online_smp(pdip, psmp, smp_dip);
14660 
14661 	return (rval);
14662 }
14663 
14664 static int
14665 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14666     dev_info_t **smp_dip)
14667 {
14668 	char		wwn_str[MPTSAS_WWN_STRLEN];
14669 	char		attached_wwn_str[MPTSAS_WWN_STRLEN];
14670 	int		ndi_rtn = NDI_FAILURE;
14671 	int		rval = 0;
14672 	mptsas_smp_t	dev_info;
14673 	uint32_t	page_address;
14674 	mptsas_t	*mpt = DIP2MPT(pdip);
14675 	uint16_t	dev_hdl;
14676 	uint64_t	sas_wwn;
14677 	uint64_t	smp_sas_wwn;
14678 	uint8_t		physport;
14679 	uint8_t		phy_id;
14680 	uint16_t	pdev_hdl;
14681 	uint8_t		numphys = 0;
14682 	uint16_t	i = 0;
14683 	char		phymask[MPTSAS_MAX_PHYS];
14684 	char		*iport = NULL;
14685 	mptsas_phymask_t	phy_mask = 0;
14686 	uint16_t	attached_devhdl;
14687 	uint16_t	bay_num, enclosure;
14688 
14689 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14690 
14691 	/*
14692 	 * Probe smp device, prevent the node of removed device from being
14693 	 * configured succesfully
14694 	 */
14695 	if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
14696 		return (DDI_FAILURE);
14697 	}
14698 
14699 	if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14700 		return (DDI_SUCCESS);
14701 	}
14702 
14703 	ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14704 
14705 	/*
14706 	 * if lun alloc success, set props
14707 	 */
14708 	if (ndi_rtn == NDI_SUCCESS) {
14709 		/*
14710 		 * Set the flavor of the child to be SMP flavored
14711 		 */
14712 		ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
14713 
14714 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
14715 		    *smp_dip, SMP_WWN, wwn_str) !=
14716 		    DDI_PROP_SUCCESS) {
14717 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14718 			    "property for smp device %s (sas_wwn)",
14719 			    wwn_str);
14720 			ndi_rtn = NDI_FAILURE;
14721 			goto smp_create_done;
14722 		}
14723 		(void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
14724 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
14725 		    *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14726 		    DDI_PROP_SUCCESS) {
14727 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14728 			    "property for iport target-port %s (sas_wwn)",
14729 			    wwn_str);
14730 			ndi_rtn = NDI_FAILURE;
14731 			goto smp_create_done;
14732 		}
14733 
14734 		mutex_enter(&mpt->m_mutex);
14735 
14736 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14737 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14738 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
14739 		    &dev_info);
14740 		if (rval != DDI_SUCCESS) {
14741 			mutex_exit(&mpt->m_mutex);
14742 			mptsas_log(mpt, CE_WARN,
14743 			    "mptsas unable to get expander "
14744 			    "parent device info for %x", page_address);
14745 			ndi_rtn = NDI_FAILURE;
14746 			goto smp_create_done;
14747 		}
14748 
14749 		smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14750 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14751 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14752 		    (uint32_t)dev_info.m_pdevhdl;
14753 		rval = mptsas_get_sas_device_page0(mpt, page_address,
14754 		    &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14755 		    &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14756 		if (rval != DDI_SUCCESS) {
14757 			mutex_exit(&mpt->m_mutex);
14758 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14759 			    "device info for %x", page_address);
14760 			ndi_rtn = NDI_FAILURE;
14761 			goto smp_create_done;
14762 		}
14763 
14764 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14765 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14766 		    (uint32_t)dev_info.m_devhdl;
14767 		rval = mptsas_get_sas_device_page0(mpt, page_address,
14768 		    &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14769 		    &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14770 		if (rval != DDI_SUCCESS) {
14771 			mutex_exit(&mpt->m_mutex);
14772 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14773 			    "device info for %x", page_address);
14774 			ndi_rtn = NDI_FAILURE;
14775 			goto smp_create_done;
14776 		}
14777 		mutex_exit(&mpt->m_mutex);
14778 
14779 		/*
14780 		 * If this smp direct attached to the controller
14781 		 * set the attached-port to the base wwid
14782 		 */
14783 		if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14784 		    != DEVINFO_DIRECT_ATTACHED) {
14785 			(void) sprintf(attached_wwn_str, "w%016"PRIx64,
14786 			    sas_wwn);
14787 		} else {
14788 			(void) sprintf(attached_wwn_str, "w%016"PRIx64,
14789 			    mpt->un.m_base_wwid);
14790 		}
14791 
14792 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
14793 		    *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14794 		    DDI_PROP_SUCCESS) {
14795 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14796 			    "property for smp attached-port %s (sas_wwn)",
14797 			    attached_wwn_str);
14798 			ndi_rtn = NDI_FAILURE;
14799 			goto smp_create_done;
14800 		}
14801 
14802 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14803 		    *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14804 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
14805 			    "create property for SMP %s (SMP_PROP) ",
14806 			    wwn_str);
14807 			ndi_rtn = NDI_FAILURE;
14808 			goto smp_create_done;
14809 		}
14810 
14811 		/*
14812 		 * check the smp to see whether it direct
14813 		 * attached to the controller
14814 		 */
14815 		if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14816 		    != DEVINFO_DIRECT_ATTACHED) {
14817 			goto smp_create_done;
14818 		}
14819 		numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14820 		    DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14821 		if (numphys > 0) {
14822 			goto smp_create_done;
14823 		}
14824 		/*
14825 		 * this iport is an old iport, we need to
14826 		 * reconfig the props for it.
14827 		 */
14828 		if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14829 		    MPTSAS_VIRTUAL_PORT, 0) !=
14830 		    DDI_PROP_SUCCESS) {
14831 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14832 			    MPTSAS_VIRTUAL_PORT);
14833 			mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14834 			    "prop update failed");
14835 			goto smp_create_done;
14836 		}
14837 
14838 		mutex_enter(&mpt->m_mutex);
14839 		numphys = 0;
14840 		iport = ddi_get_name_addr(pdip);
14841 		for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14842 			bzero(phymask, sizeof (phymask));
14843 			(void) sprintf(phymask,
14844 			    "%x", mpt->m_phy_info[i].phy_mask);
14845 			if (strcmp(phymask, iport) == 0) {
14846 				phy_mask = mpt->m_phy_info[i].phy_mask;
14847 				break;
14848 			}
14849 		}
14850 
14851 		for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14852 			if ((phy_mask >> i) & 0x01) {
14853 				numphys++;
14854 			}
14855 		}
14856 		/*
14857 		 * Update PHY info for smhba
14858 		 */
14859 		if (mptsas_smhba_phy_init(mpt)) {
14860 			mutex_exit(&mpt->m_mutex);
14861 			mptsas_log(mpt, CE_WARN, "mptsas phy update "
14862 			    "failed");
14863 			goto smp_create_done;
14864 		}
14865 		mutex_exit(&mpt->m_mutex);
14866 
14867 		mptsas_smhba_set_phy_props(mpt, iport, pdip,
14868 		    numphys, &attached_devhdl);
14869 
14870 		if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14871 		    MPTSAS_NUM_PHYS, numphys) !=
14872 		    DDI_PROP_SUCCESS) {
14873 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14874 			    MPTSAS_NUM_PHYS);
14875 			mptsas_log(mpt, CE_WARN, "mptsas update "
14876 			    "num phys props failed");
14877 			goto smp_create_done;
14878 		}
14879 		/*
14880 		 * Add parent's props for SMHBA support
14881 		 */
14882 		if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14883 		    SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14884 		    DDI_PROP_SUCCESS) {
14885 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14886 			    SCSI_ADDR_PROP_ATTACHED_PORT);
14887 			mptsas_log(mpt, CE_WARN, "mptsas update iport"
14888 			    "attached-port failed");
14889 			goto smp_create_done;
14890 		}
14891 
14892 smp_create_done:
14893 		/*
14894 		 * If props were setup ok, online the lun
14895 		 */
14896 		if (ndi_rtn == NDI_SUCCESS) {
14897 			/*
14898 			 * Try to online the new node
14899 			 */
14900 			ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14901 		}
14902 
14903 		/*
14904 		 * If success set rtn flag, else unwire alloc'd lun
14905 		 */
14906 		if (ndi_rtn != NDI_SUCCESS) {
14907 			NDBG12(("mptsas unable to online "
14908 			    "SMP target %s", wwn_str));
14909 			ndi_prop_remove_all(*smp_dip);
14910 			(void) ndi_devi_free(*smp_dip);
14911 		}
14912 	}
14913 
14914 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14915 }
14916 
14917 /* smp transport routine */
14918 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14919 {
14920 	uint64_t			wwn;
14921 	Mpi2SmpPassthroughRequest_t	req;
14922 	Mpi2SmpPassthroughReply_t	rep;
14923 	uint32_t			direction = 0;
14924 	mptsas_t			*mpt;
14925 	int				ret;
14926 	uint64_t			tmp64;
14927 
14928 	mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14929 	    smp_a_hba_tran->smp_tran_hba_private;
14930 
14931 	bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14932 	/*
14933 	 * Need to compose a SMP request message
14934 	 * and call mptsas_do_passthru() function
14935 	 */
14936 	bzero(&req, sizeof (req));
14937 	bzero(&rep, sizeof (rep));
14938 	req.PassthroughFlags = 0;
14939 	req.PhysicalPort = 0xff;
14940 	req.ChainOffset = 0;
14941 	req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14942 
14943 	if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14944 		smp_pkt->smp_pkt_reason = ERANGE;
14945 		return (DDI_FAILURE);
14946 	}
14947 	req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14948 
14949 	req.MsgFlags = 0;
14950 	tmp64 = LE_64(wwn);
14951 	bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14952 	if (smp_pkt->smp_pkt_rspsize > 0) {
14953 		direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14954 	}
14955 	if (smp_pkt->smp_pkt_reqsize > 0) {
14956 		direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
14957 	}
14958 
14959 	mutex_enter(&mpt->m_mutex);
14960 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
14961 	    (uint8_t *)smp_pkt->smp_pkt_rsp,
14962 	    offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
14963 	    smp_pkt->smp_pkt_rspsize - 4, direction,
14964 	    (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
14965 	    smp_pkt->smp_pkt_timeout, FKIOCTL);
14966 	mutex_exit(&mpt->m_mutex);
14967 	if (ret != 0) {
14968 		cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
14969 		smp_pkt->smp_pkt_reason = (uchar_t)(ret);
14970 		return (DDI_FAILURE);
14971 	}
14972 	/* do passthrough success, check the smp status */
14973 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
14974 		switch (LE_16(rep.IOCStatus)) {
14975 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
14976 			smp_pkt->smp_pkt_reason = ENODEV;
14977 			break;
14978 		case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
14979 			smp_pkt->smp_pkt_reason = EOVERFLOW;
14980 			break;
14981 		case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
14982 			smp_pkt->smp_pkt_reason = EIO;
14983 			break;
14984 		default:
14985 			mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
14986 			    "status:%x", LE_16(rep.IOCStatus));
14987 			smp_pkt->smp_pkt_reason = EIO;
14988 			break;
14989 		}
14990 		return (DDI_FAILURE);
14991 	}
14992 	if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
14993 		mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
14994 		    rep.SASStatus);
14995 		smp_pkt->smp_pkt_reason = EIO;
14996 		return (DDI_FAILURE);
14997 	}
14998 
14999 	return (DDI_SUCCESS);
15000 }
15001 
15002 static void
15003 mptsas_idle_pm(void *arg)
15004 {
15005 	mptsas_t	*mpt = arg;
15006 
15007 	(void) pm_idle_component(mpt->m_dip, 0);
15008 	mutex_enter(&mpt->m_mutex);
15009 	mpt->m_pm_timeid = 0;
15010 	mutex_exit(&mpt->m_mutex);
15011 }
15012 
15013 /*
15014  * If we didn't get a match, we need to get sas page0 for each device, and
15015  * untill we get a match. If failed, return NULL
15016  */
15017 static mptsas_target_t *
15018 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
15019 {
15020 	int		i, j = 0;
15021 	int		rval = 0;
15022 	uint16_t	cur_handle;
15023 	uint32_t	page_address;
15024 	mptsas_target_t	*ptgt = NULL;
15025 
15026 	/*
15027 	 * PHY named device must be direct attached and attaches to
15028 	 * narrow port, if the iport is not parent of the device which
15029 	 * we are looking for.
15030 	 */
15031 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15032 		if ((1 << i) & phymask)
15033 			j++;
15034 	}
15035 
15036 	if (j > 1)
15037 		return (NULL);
15038 
15039 	/*
15040 	 * Must be a narrow port and single device attached to the narrow port
15041 	 * So the physical port num of device  which is equal to the iport's
15042 	 * port num is the device what we are looking for.
15043 	 */
15044 
15045 	if (mpt->m_phy_info[phy].phy_mask != phymask)
15046 		return (NULL);
15047 
15048 	mutex_enter(&mpt->m_mutex);
15049 
15050 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
15051 	    MPTSAS_HASH_FIRST);
15052 	while (ptgt != NULL) {
15053 			if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15054 			mutex_exit(&mpt->m_mutex);
15055 			return (ptgt);
15056 		}
15057 
15058 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
15059 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
15060 	}
15061 
15062 	if (mpt->m_done_traverse_dev) {
15063 		mutex_exit(&mpt->m_mutex);
15064 		return (NULL);
15065 	}
15066 
15067 	/* If didn't get a match, come here */
15068 	cur_handle = mpt->m_dev_handle;
15069 	for (; ; ) {
15070 		ptgt = NULL;
15071 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15072 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15073 		rval = mptsas_get_target_device_info(mpt, page_address,
15074 		    &cur_handle, &ptgt);
15075 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
15076 		    (rval == DEV_INFO_FAIL_ALLOC)) {
15077 			break;
15078 		}
15079 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15080 		    (rval == DEV_INFO_PHYS_DISK)) {
15081 			continue;
15082 		}
15083 		mpt->m_dev_handle = cur_handle;
15084 
15085 		if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15086 			break;
15087 		}
15088 	}
15089 
15090 	mutex_exit(&mpt->m_mutex);
15091 	return (ptgt);
15092 }
15093 
15094 /*
15095  * The ptgt->m_sas_wwn contains the wwid for each disk.
15096  * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15097  * If we didn't get a match, we need to get sas page0 for each device, and
15098  * untill we get a match
15099  * If failed, return NULL
15100  */
15101 static mptsas_target_t *
15102 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
15103 {
15104 	int		rval = 0;
15105 	uint16_t	cur_handle;
15106 	uint32_t	page_address;
15107 	mptsas_target_t	*tmp_tgt = NULL;
15108 
15109 	mutex_enter(&mpt->m_mutex);
15110 	tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15111 	    &mpt->m_active->m_tgttbl, wwid, phymask);
15112 	if (tmp_tgt != NULL) {
15113 		mutex_exit(&mpt->m_mutex);
15114 		return (tmp_tgt);
15115 	}
15116 
15117 	if (phymask == 0) {
15118 		/*
15119 		 * It's IR volume
15120 		 */
15121 		rval = mptsas_get_raid_info(mpt);
15122 		if (rval) {
15123 			tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15124 			    &mpt->m_active->m_tgttbl, wwid, phymask);
15125 		}
15126 		mutex_exit(&mpt->m_mutex);
15127 		return (tmp_tgt);
15128 	}
15129 
15130 	if (mpt->m_done_traverse_dev) {
15131 		mutex_exit(&mpt->m_mutex);
15132 		return (NULL);
15133 	}
15134 
15135 	/* If didn't get a match, come here */
15136 	cur_handle = mpt->m_dev_handle;
15137 	for (; ; ) {
15138 		tmp_tgt = NULL;
15139 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15140 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15141 		rval = mptsas_get_target_device_info(mpt, page_address,
15142 		    &cur_handle, &tmp_tgt);
15143 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
15144 		    (rval == DEV_INFO_FAIL_ALLOC)) {
15145 			tmp_tgt = NULL;
15146 			break;
15147 		}
15148 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15149 		    (rval == DEV_INFO_PHYS_DISK)) {
15150 			continue;
15151 		}
15152 		mpt->m_dev_handle = cur_handle;
15153 		if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15154 		    (tmp_tgt->m_phymask == phymask)) {
15155 			break;
15156 		}
15157 	}
15158 
15159 	mutex_exit(&mpt->m_mutex);
15160 	return (tmp_tgt);
15161 }
15162 
15163 static mptsas_smp_t *
15164 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15165 {
15166 	int		rval = 0;
15167 	uint16_t	cur_handle;
15168 	uint32_t	page_address;
15169 	mptsas_smp_t	smp_node, *psmp = NULL;
15170 
15171 	mutex_enter(&mpt->m_mutex);
15172 	psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15173 	    wwid, phymask);
15174 	if (psmp != NULL) {
15175 		mutex_exit(&mpt->m_mutex);
15176 		return (psmp);
15177 	}
15178 
15179 	if (mpt->m_done_traverse_smp) {
15180 		mutex_exit(&mpt->m_mutex);
15181 		return (NULL);
15182 	}
15183 
15184 	/* If didn't get a match, come here */
15185 	cur_handle = mpt->m_smp_devhdl;
15186 	for (; ; ) {
15187 		psmp = NULL;
15188 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15189 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15190 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
15191 		    &smp_node);
15192 		if (rval != DDI_SUCCESS) {
15193 			break;
15194 		}
15195 		mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15196 		psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15197 		ASSERT(psmp);
15198 		if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15199 		    (psmp->m_phymask == phymask)) {
15200 			break;
15201 		}
15202 	}
15203 
15204 	mutex_exit(&mpt->m_mutex);
15205 	return (psmp);
15206 }
15207 
15208 /* helper functions using hash */
15209 
15210 /*
15211  * Can't have duplicate entries for same devhdl,
15212  * if there are invalid entries, the devhdl should be set to 0xffff
15213  */
15214 static void *
15215 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15216 {
15217 	mptsas_hash_data_t *data;
15218 
15219 	data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15220 	while (data != NULL) {
15221 		if (data->devhdl == devhdl) {
15222 			break;
15223 		}
15224 		data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15225 	}
15226 	return (data);
15227 }
15228 
15229 mptsas_target_t *
15230 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15231     uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15232 {
15233 	mptsas_target_t *tmp_tgt = NULL;
15234 
15235 	tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15236 	if (tmp_tgt != NULL) {
15237 		NDBG20(("Hash item already exist"));
15238 		tmp_tgt->m_deviceinfo = devinfo;
15239 		tmp_tgt->m_devhdl = devhdl;
15240 		return (tmp_tgt);
15241 	}
15242 	tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15243 	if (tmp_tgt == NULL) {
15244 		cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15245 		return (NULL);
15246 	}
15247 	tmp_tgt->m_devhdl = devhdl;
15248 	tmp_tgt->m_sas_wwn = wwid;
15249 	tmp_tgt->m_deviceinfo = devinfo;
15250 	tmp_tgt->m_phymask = phymask;
15251 	tmp_tgt->m_phynum = phynum;
15252 	/* Initialized the tgt structure */
15253 	tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15254 	tmp_tgt->m_qfull_retry_interval =
15255 	    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15256 	tmp_tgt->m_t_throttle = MAX_THROTTLE;
15257 
15258 	mptsas_hash_add(hashtab, tmp_tgt);
15259 
15260 	return (tmp_tgt);
15261 }
15262 
15263 static void
15264 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15265     mptsas_phymask_t phymask)
15266 {
15267 	mptsas_target_t *tmp_tgt;
15268 	tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15269 	if (tmp_tgt == NULL) {
15270 		cmn_err(CE_WARN, "Tgt not found, nothing to free");
15271 	} else {
15272 		kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15273 	}
15274 }
15275 
15276 /*
15277  * Return the entry in the hash table
15278  */
15279 static mptsas_smp_t *
15280 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15281 {
15282 	uint64_t key1 = data->m_sasaddr;
15283 	mptsas_phymask_t key2 = data->m_phymask;
15284 	mptsas_smp_t *ret_data;
15285 
15286 	ret_data = mptsas_hash_search(hashtab, key1, key2);
15287 	if (ret_data != NULL) {
15288 		bcopy(data, ret_data, sizeof (mptsas_smp_t));
15289 		return (ret_data);
15290 	}
15291 
15292 	ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15293 	bcopy(data, ret_data, sizeof (mptsas_smp_t));
15294 	mptsas_hash_add(hashtab, ret_data);
15295 	return (ret_data);
15296 }
15297 
15298 static void
15299 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15300     mptsas_phymask_t phymask)
15301 {
15302 	mptsas_smp_t *tmp_smp;
15303 	tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15304 	if (tmp_smp == NULL) {
15305 		cmn_err(CE_WARN, "Smp element not found, nothing to free");
15306 	} else {
15307 		kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15308 	}
15309 }
15310 
15311 /*
15312  * Hash operation functions
15313  * key1 is the sas_wwn, key2 is the phymask
15314  */
15315 static void
15316 mptsas_hash_init(mptsas_hash_table_t *hashtab)
15317 {
15318 	if (hashtab == NULL) {
15319 		return;
15320 	}
15321 	bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15322 	    MPTSAS_HASH_ARRAY_SIZE);
15323 	hashtab->cur = NULL;
15324 	hashtab->line = 0;
15325 }
15326 
15327 static void
15328 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15329 {
15330 	uint16_t line = 0;
15331 	mptsas_hash_node_t *cur = NULL, *last = NULL;
15332 
15333 	if (hashtab == NULL) {
15334 		return;
15335 	}
15336 	for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15337 		cur = hashtab->head[line];
15338 		while (cur != NULL) {
15339 			last = cur;
15340 			cur = cur->next;
15341 			kmem_free(last->data, datalen);
15342 			kmem_free(last, sizeof (mptsas_hash_node_t));
15343 		}
15344 	}
15345 }
15346 
15347 /*
15348  * You must guarantee the element doesn't exist in the hash table
15349  * before you call mptsas_hash_add()
15350  */
15351 static void
15352 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
15353 {
15354 	uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
15355 	mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
15356 	mptsas_hash_node_t **head = NULL;
15357 	mptsas_hash_node_t *node = NULL;
15358 
15359 	if (hashtab == NULL) {
15360 		return;
15361 	}
15362 	ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
15363 	node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
15364 	node->data = data;
15365 
15366 	head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15367 	if (*head == NULL) {
15368 		*head = node;
15369 	} else {
15370 		node->next = *head;
15371 		*head = node;
15372 	}
15373 }
15374 
15375 static void *
15376 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
15377     mptsas_phymask_t key2)
15378 {
15379 	mptsas_hash_node_t **head = NULL;
15380 	mptsas_hash_node_t *last = NULL, *cur = NULL;
15381 	mptsas_hash_data_t *data;
15382 	if (hashtab == NULL) {
15383 		return (NULL);
15384 	}
15385 	head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15386 	cur = *head;
15387 	while (cur != NULL) {
15388 		data = cur->data;
15389 		if ((data->key1 == key1) && (data->key2 == key2)) {
15390 			if (last == NULL) {
15391 				(*head) = cur->next;
15392 			} else {
15393 				last->next = cur->next;
15394 			}
15395 			kmem_free(cur, sizeof (mptsas_hash_node_t));
15396 			return (data);
15397 		} else {
15398 			last = cur;
15399 			cur = cur->next;
15400 		}
15401 	}
15402 	return (NULL);
15403 }
15404 
15405 static void *
15406 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
15407     mptsas_phymask_t key2)
15408 {
15409 	mptsas_hash_node_t *cur = NULL;
15410 	mptsas_hash_data_t *data;
15411 	if (hashtab == NULL) {
15412 		return (NULL);
15413 	}
15414 	cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
15415 	while (cur != NULL) {
15416 		data = cur->data;
15417 		if ((data->key1 == key1) && (data->key2 == key2)) {
15418 			return (data);
15419 		} else {
15420 			cur = cur->next;
15421 		}
15422 	}
15423 	return (NULL);
15424 }
15425 
15426 static void *
15427 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
15428 {
15429 	mptsas_hash_node_t *this = NULL;
15430 
15431 	if (hashtab == NULL) {
15432 		return (NULL);
15433 	}
15434 
15435 	if (pos == MPTSAS_HASH_FIRST) {
15436 		hashtab->line = 0;
15437 		hashtab->cur = NULL;
15438 		this = hashtab->head[0];
15439 	} else {
15440 		if (hashtab->cur == NULL) {
15441 			return (NULL);
15442 		} else {
15443 			this = hashtab->cur->next;
15444 		}
15445 	}
15446 
15447 	while (this == NULL) {
15448 		hashtab->line++;
15449 		if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
15450 			/* the traverse reaches the end */
15451 			hashtab->cur = NULL;
15452 			return (NULL);
15453 		} else {
15454 			this = hashtab->head[hashtab->line];
15455 		}
15456 	}
15457 	hashtab->cur = this;
15458 	return (this->data);
15459 }
15460 
15461 /*
15462  * Functions for SGPIO LED support
15463  */
15464 static dev_info_t *
15465 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15466 {
15467 	dev_info_t	*dip;
15468 	int		prop;
15469 	dip = e_ddi_hold_devi_by_dev(dev, 0);
15470 	if (dip == NULL)
15471 		return (dip);
15472 	prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15473 	    "phymask", 0);
15474 	*phymask = (mptsas_phymask_t)prop;
15475 	ddi_release_devi(dip);
15476 	return (dip);
15477 }
15478 static mptsas_target_t *
15479 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
15480 {
15481 	uint8_t			phynum;
15482 	uint64_t		wwn;
15483 	int			lun;
15484 	mptsas_target_t		*ptgt = NULL;
15485 
15486 	if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
15487 		return (NULL);
15488 	}
15489 	if (addr[0] == 'w') {
15490 		ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
15491 	} else {
15492 		ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
15493 	}
15494 	return (ptgt);
15495 }
15496 
15497 #ifdef MPTSAS_GET_LED
15498 static int
15499 mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
15500     uint32_t *slotstatus)
15501 {
15502 	return (mptsas_send_sep(mpt, ptgt, slotstatus,
15503 	    MPI2_SEP_REQ_ACTION_READ_STATUS));
15504 }
15505 #endif
15506 static int
15507 mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus)
15508 {
15509 	NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
15510 	    slotstatus, ptgt->m_slot_num));
15511 	return (mptsas_send_sep(mpt, ptgt, &slotstatus,
15512 	    MPI2_SEP_REQ_ACTION_WRITE_STATUS));
15513 }
15514 /*
15515  *  send sep request, use enclosure/slot addressing
15516  */
15517 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
15518     uint32_t *status, uint8_t act)
15519 {
15520 	Mpi2SepRequest_t	req;
15521 	Mpi2SepReply_t		rep;
15522 	int			ret;
15523 
15524 	ASSERT(mutex_owned(&mpt->m_mutex));
15525 
15526 	bzero(&req, sizeof (req));
15527 	bzero(&rep, sizeof (rep));
15528 
15529 	req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
15530 	req.Action = act;
15531 	req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
15532 	req.EnclosureHandle = LE_16(ptgt->m_enclosure);
15533 	req.Slot = LE_16(ptgt->m_slot_num);
15534 	if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15535 		req.SlotStatus = LE_32(*status);
15536 	}
15537 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
15538 	    sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
15539 	if (ret != 0) {
15540 		mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
15541 		    "Processor Request message error %d", ret);
15542 		return (DDI_FAILURE);
15543 	}
15544 	/* do passthrough success, check the ioc status */
15545 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15546 		if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
15547 		    MPI2_IOCSTATUS_INVALID_FIELD) {
15548 			mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
15549 			    "supported action, loginfo %x", act,
15550 			    LE_32(rep.IOCLogInfo));
15551 			return (DDI_FAILURE);
15552 		}
15553 		mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
15554 		    "status:%x", act, LE_16(rep.IOCStatus));
15555 		return (DDI_FAILURE);
15556 	}
15557 	if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15558 		*status = LE_32(rep.SlotStatus);
15559 	}
15560 
15561 	return (DDI_SUCCESS);
15562 }
15563