xref: /illumos-gate/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c (revision 8e268185036f404515ce8bc575423ac390136e3f)
1 /*
2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3  * i.e. Thunderbolt and Invader
4  *
5  * Solaris MegaRAID device driver for SAS2.0 controllers
6  * Copyright (c) 2008-2012, LSI Logic Corporation.
7  * All rights reserved.
8  *
9  * Version:
10  * Author:
11  *		Swaminathan K S
12  *		Arun Chandrashekhar
13  *		Manju R
14  *		Rasheed
15  *		Shakeel Bukhari
16  */
17 
18 /*
19  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
20  */
21 
22 
23 #include <sys/types.h>
24 #include <sys/file.h>
25 #include <sys/atomic.h>
26 #include <sys/scsi/scsi.h>
27 #include <sys/byteorder.h>
28 #include "ld_pd_map.h"
29 #include "mr_sas.h"
30 #include "fusion.h"
31 
32 /*
33  * FMA header files
34  */
35 #include <sys/ddifm.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/fm/util.h>
38 #include <sys/fm/io/ddi.h>
39 
40 
41 /* Pre-TB command size and TB command size. */
42 #define	MR_COMMAND_SIZE (64*20)	/* 1280 bytes */
43 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
44 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
45 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
46 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
47 extern ddi_dma_attr_t mrsas_generic_dma_attr;
48 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
49 extern struct ddi_device_acc_attr endian_attr;
50 extern int	debug_level_g;
51 extern unsigned int	enable_fp;
52 volatile int dump_io_wait_time = 90;
53 extern volatile int  debug_timeout_g;
54 extern int	mrsas_issue_pending_cmds(struct mrsas_instance *);
55 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
56 extern void	push_pending_mfi_pkt(struct mrsas_instance *,
57 			struct mrsas_cmd *);
58 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
59 	    MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
60 
61 /* Local static prototypes. */
62 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
63     struct scsi_address *, struct scsi_pkt *, uchar_t *);
64 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
65     U64 start_blk, U32 num_blocks);
66 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
67 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
68 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
69 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
70 #ifdef PDSUPPORT
71 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
72     struct mrsas_tbolt_pd_info *, int);
73 #endif /* PDSUPPORT */
74 
75 static int debug_tbolt_fw_faults_after_ocr_g = 0;
76 
77 /*
78  * destroy_mfi_mpi_frame_pool
79  */
80 void
81 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
82 {
83 	int	i;
84 
85 	struct mrsas_cmd	*cmd;
86 
87 	/* return all mfi frames to pool */
88 	for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
89 		cmd = instance->cmd_list[i];
90 		if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
91 			(void) mrsas_free_dma_obj(instance,
92 			    cmd->frame_dma_obj);
93 		}
94 		cmd->frame_dma_obj_status = DMA_OBJ_FREED;
95 	}
96 }
97 
98 /*
99  * destroy_mpi2_frame_pool
100  */
101 void
102 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
103 {
104 
105 	if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
106 		(void) mrsas_free_dma_obj(instance,
107 		    instance->mpi2_frame_pool_dma_obj);
108 		instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
109 	}
110 }
111 
112 
113 /*
114  * mrsas_tbolt_free_additional_dma_buffer
115  */
116 void
117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
118 {
119 	int i;
120 
121 	if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
122 		(void) mrsas_free_dma_obj(instance,
123 		    instance->mfi_internal_dma_obj);
124 		instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
125 	}
126 	if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
127 		(void) mrsas_free_dma_obj(instance,
128 		    instance->mfi_evt_detail_obj);
129 		instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
130 	}
131 
132 	for (i = 0; i < 2; i++) {
133 		if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
134 			(void) mrsas_free_dma_obj(instance,
135 			    instance->ld_map_obj[i]);
136 			instance->ld_map_obj[i].status = DMA_OBJ_FREED;
137 		}
138 	}
139 }
140 
141 
142 /*
143  * free_req_desc_pool
144  */
145 void
146 free_req_rep_desc_pool(struct mrsas_instance *instance)
147 {
148 	if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
149 		(void) mrsas_free_dma_obj(instance,
150 		    instance->request_desc_dma_obj);
151 		instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
152 	}
153 
154 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
155 		(void) mrsas_free_dma_obj(instance,
156 		    instance->reply_desc_dma_obj);
157 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
158 	}
159 
160 
161 }
162 
163 
164 /*
165  * ThunderBolt(TB) Request Message Frame Pool
166  */
167 int
168 create_mpi2_frame_pool(struct mrsas_instance *instance)
169 {
170 	int		i = 0;
171 	uint16_t	max_cmd;
172 	uint32_t	sgl_sz;
173 	uint32_t	raid_msg_size;
174 	uint32_t	total_size;
175 	uint32_t	offset;
176 	uint32_t	io_req_base_phys;
177 	uint8_t		*io_req_base;
178 	struct mrsas_cmd	*cmd;
179 
180 	max_cmd = instance->max_fw_cmds;
181 
182 	sgl_sz		= 1024;
183 	raid_msg_size	= MRSAS_THUNDERBOLT_MSG_SIZE;
184 
185 	/* Allocating additional 256 bytes to accomodate SMID 0. */
186 	total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
187 	    (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
188 
189 	con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
190 	    "max_cmd %x", max_cmd));
191 
192 	con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
193 	    "request message frame pool size %x", total_size));
194 
195 	/*
196 	 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
197 	 * and then split the memory to 1024 commands. Each command should be
198 	 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
199 	 * within it. Further refer the "alloc_req_rep_desc" function where
200 	 * we allocate request/reply descriptors queues for a clue.
201 	 */
202 
203 	instance->mpi2_frame_pool_dma_obj.size = total_size;
204 	instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
205 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
206 	    0xFFFFFFFFU;
207 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
208 	    0xFFFFFFFFU;
209 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
210 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
211 
212 	if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
213 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
214 		cmn_err(CE_WARN,
215 		    "mr_sas: could not alloc mpi2 frame pool");
216 		return (DDI_FAILURE);
217 	}
218 
219 	bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
220 	instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
221 
222 	instance->io_request_frames =
223 	    (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
224 	instance->io_request_frames_phy =
225 	    (uint32_t)
226 	    instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
227 
228 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
229 	    (void *)instance->io_request_frames));
230 
231 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
232 	    instance->io_request_frames_phy));
233 
234 	io_req_base = (uint8_t *)instance->io_request_frames +
235 	    MRSAS_THUNDERBOLT_MSG_SIZE;
236 	io_req_base_phys = instance->io_request_frames_phy +
237 	    MRSAS_THUNDERBOLT_MSG_SIZE;
238 
239 	con_log(CL_DLEVEL3, (CE_NOTE,
240 	    "io req_base_phys 0x%x", io_req_base_phys));
241 
242 	for (i = 0; i < max_cmd; i++) {
243 		cmd = instance->cmd_list[i];
244 
245 		offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
246 
247 		cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
248 		    ((uint8_t *)io_req_base + offset);
249 		cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
250 
251 		cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
252 		    (max_cmd * raid_msg_size) + i * sgl_sz);
253 
254 		cmd->sgl_phys_addr = (io_req_base_phys +
255 		    (max_cmd * raid_msg_size) + i * sgl_sz);
256 
257 		cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
258 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
259 		    (i * SENSE_LENGTH));
260 
261 		cmd->sense_phys_addr1 = (io_req_base_phys +
262 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
263 		    (i * SENSE_LENGTH));
264 
265 
266 		cmd->SMID = i + 1;
267 
268 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
269 		    cmd->index, (void *)cmd->scsi_io_request));
270 
271 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
272 		    cmd->index, cmd->scsi_io_request_phys_addr));
273 
274 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
275 		    cmd->index, (void *)cmd->sense1));
276 
277 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
278 		    cmd->index, cmd->sense_phys_addr1));
279 
280 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
281 		    cmd->index, (void *)cmd->sgl));
282 
283 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
284 		    cmd->index, cmd->sgl_phys_addr));
285 	}
286 
287 	return (DDI_SUCCESS);
288 
289 }
290 
291 
292 /*
293  * alloc_additional_dma_buffer for AEN
294  */
295 int
296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
297 {
298 	uint32_t	internal_buf_size = PAGESIZE*2;
299 	int i;
300 
301 	/* Initialize buffer status as free */
302 	instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
303 	instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
304 	instance->ld_map_obj[0].status = DMA_OBJ_FREED;
305 	instance->ld_map_obj[1].status = DMA_OBJ_FREED;
306 
307 
308 	instance->mfi_internal_dma_obj.size = internal_buf_size;
309 	instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
310 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
311 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
312 	    0xFFFFFFFFU;
313 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
314 
315 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
316 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
317 		cmn_err(CE_WARN,
318 		    "mr_sas: could not alloc reply queue");
319 		return (DDI_FAILURE);
320 	}
321 
322 	bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
323 
324 	instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
325 	instance->internal_buf =
326 	    (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
327 	instance->internal_buf_dmac_add =
328 	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
329 	instance->internal_buf_size = internal_buf_size;
330 
331 	/* allocate evt_detail */
332 	instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
333 	instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
334 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
335 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
336 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
337 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
338 
339 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
340 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
341 		cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
342 		    "could not allocate data transfer buffer.");
343 		goto fail_tbolt_additional_buff;
344 	}
345 
346 	bzero(instance->mfi_evt_detail_obj.buffer,
347 	    sizeof (struct mrsas_evt_detail));
348 
349 	instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
350 
351 	instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
352 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
353 
354 	for (i = 0; i < 2; i++) {
355 		/* allocate the data transfer buffer */
356 		instance->ld_map_obj[i].size = instance->size_map_info;
357 		instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
358 		instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
359 		instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
360 		    0xFFFFFFFFU;
361 		instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
362 		instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
363 
364 		if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
365 		    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
366 			cmn_err(CE_WARN,
367 			    "could not allocate data transfer buffer.");
368 			goto fail_tbolt_additional_buff;
369 		}
370 
371 		instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
372 
373 		bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
374 
375 		instance->ld_map[i] =
376 		    (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
377 		instance->ld_map_phy[i] = (uint32_t)instance->
378 		    ld_map_obj[i].dma_cookie[0].dmac_address;
379 
380 		con_log(CL_DLEVEL3, (CE_NOTE,
381 		    "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
382 
383 		con_log(CL_DLEVEL3, (CE_NOTE,
384 		    "size_map_info 0x%x", instance->size_map_info));
385 	}
386 
387 	return (DDI_SUCCESS);
388 
389 fail_tbolt_additional_buff:
390 	mrsas_tbolt_free_additional_dma_buffer(instance);
391 
392 	return (DDI_FAILURE);
393 }
394 
395 MRSAS_REQUEST_DESCRIPTOR_UNION *
396 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
397 {
398 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
399 
400 	if (index > instance->max_fw_cmds) {
401 		con_log(CL_ANN1, (CE_NOTE,
402 		    "Invalid SMID 0x%x request for descriptor", index));
403 		con_log(CL_ANN1, (CE_NOTE,
404 		    "max_fw_cmds : 0x%x", instance->max_fw_cmds));
405 		return (NULL);
406 	}
407 
408 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
409 	    ((char *)instance->request_message_pool +
410 	    (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
411 
412 	con_log(CL_ANN1, (CE_NOTE,
413 	    "request descriptor : 0x%08lx", (unsigned long)req_desc));
414 
415 	con_log(CL_ANN1, (CE_NOTE,
416 	    "request descriptor base phy : 0x%08lx",
417 	    (unsigned long)instance->request_message_pool_phy));
418 
419 	return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
420 }
421 
422 
423 /*
424  * Allocate Request and Reply  Queue Descriptors.
425  */
426 int
427 alloc_req_rep_desc(struct mrsas_instance *instance)
428 {
429 	uint32_t	request_q_sz, reply_q_sz;
430 	int		i, max_reply_q_sz;
431 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
432 
433 	/*
434 	 * ThunderBolt(TB) There's no longer producer consumer mechanism.
435 	 * Once we have an interrupt we are supposed to scan through the list of
436 	 * reply descriptors and process them accordingly. We would be needing
437 	 * to allocate memory for 1024 reply descriptors
438 	 */
439 
440 	/* Allocate Reply Descriptors */
441 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
442 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
443 
444 	/* reply queue size should be multiple of 16 */
445 	max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
446 
447 	reply_q_sz = 8 * max_reply_q_sz;
448 
449 
450 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
451 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
452 
453 	instance->reply_desc_dma_obj.size = reply_q_sz;
454 	instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
455 	instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
456 	instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
457 	instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
458 	instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
459 
460 	if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
461 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
462 		cmn_err(CE_WARN,
463 		    "mr_sas: could not alloc reply queue");
464 		return (DDI_FAILURE);
465 	}
466 
467 	bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
468 	instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
469 
470 	/* virtual address of  reply queue */
471 	instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
472 	    instance->reply_desc_dma_obj.buffer);
473 
474 	instance->reply_q_depth = max_reply_q_sz;
475 
476 	con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
477 	    instance->reply_q_depth));
478 
479 	con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
480 	    (void *)instance->reply_frame_pool));
481 
482 	/* initializing reply address to 0xFFFFFFFF */
483 	reply_desc = instance->reply_frame_pool;
484 
485 	for (i = 0; i < instance->reply_q_depth; i++) {
486 		reply_desc->Words = (uint64_t)~0;
487 		reply_desc++;
488 	}
489 
490 
491 	instance->reply_frame_pool_phy =
492 	    (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
493 
494 	con_log(CL_ANN1, (CE_NOTE,
495 	    "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
496 
497 
498 	instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
499 	    reply_q_sz);
500 
501 	con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
502 	    instance->reply_pool_limit_phy));
503 
504 
505 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
506 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
507 
508 	/* Allocate Request Descriptors */
509 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
510 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
511 
512 	request_q_sz = 8 *
513 	    (instance->max_fw_cmds);
514 
515 	instance->request_desc_dma_obj.size = request_q_sz;
516 	instance->request_desc_dma_obj.dma_attr	= mrsas_generic_dma_attr;
517 	instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
518 	instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
519 	    0xFFFFFFFFU;
520 	instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen	= 1;
521 	instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
522 
523 	if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
524 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
525 		cmn_err(CE_WARN,
526 		    "mr_sas: could not alloc request queue desc");
527 		goto fail_undo_reply_queue;
528 	}
529 
530 	bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
531 	instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
532 
533 	/* virtual address of  request queue desc */
534 	instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
535 	    (instance->request_desc_dma_obj.buffer);
536 
537 	instance->request_message_pool_phy =
538 	    (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
539 
540 	return (DDI_SUCCESS);
541 
542 fail_undo_reply_queue:
543 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
544 		(void) mrsas_free_dma_obj(instance,
545 		    instance->reply_desc_dma_obj);
546 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
547 	}
548 
549 	return (DDI_FAILURE);
550 }
551 
552 /*
553  * mrsas_alloc_cmd_pool_tbolt
554  *
555  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
556  * routine
557  */
558 int
559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
560 {
561 	int		i;
562 	int		count;
563 	uint32_t	max_cmd;
564 	uint32_t	reserve_cmd;
565 	size_t		sz;
566 
567 	struct mrsas_cmd	*cmd;
568 
569 	max_cmd = instance->max_fw_cmds;
570 	con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
571 	    "max_cmd %x", max_cmd));
572 
573 
574 	sz = sizeof (struct mrsas_cmd *) * max_cmd;
575 
576 	/*
577 	 * instance->cmd_list is an array of struct mrsas_cmd pointers.
578 	 * Allocate the dynamic array first and then allocate individual
579 	 * commands.
580 	 */
581 	instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
582 
583 	/* create a frame pool and assign one frame to each cmd */
584 	for (count = 0; count < max_cmd; count++) {
585 		instance->cmd_list[count] =
586 		    kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
587 	}
588 
589 	/* add all the commands to command pool */
590 
591 	INIT_LIST_HEAD(&instance->cmd_pool_list);
592 	INIT_LIST_HEAD(&instance->cmd_pend_list);
593 	INIT_LIST_HEAD(&instance->cmd_app_pool_list);
594 
595 	reserve_cmd = MRSAS_APP_RESERVED_CMDS;
596 
597 	/* cmd index 0 reservered for IOC INIT */
598 	for (i = 1; i < reserve_cmd; i++) {
599 		cmd		= instance->cmd_list[i];
600 		cmd->index	= i;
601 		mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
602 	}
603 
604 
605 	for (i = reserve_cmd; i < max_cmd; i++) {
606 		cmd		= instance->cmd_list[i];
607 		cmd->index	= i;
608 		mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
609 	}
610 
611 	return (DDI_SUCCESS);
612 
613 mrsas_undo_cmds:
614 	if (count > 0) {
615 		/* free each cmd */
616 		for (i = 0; i < count; i++) {
617 			if (instance->cmd_list[i] != NULL) {
618 				kmem_free(instance->cmd_list[i],
619 				    sizeof (struct mrsas_cmd));
620 			}
621 			instance->cmd_list[i] = NULL;
622 		}
623 	}
624 
625 mrsas_undo_cmd_list:
626 	if (instance->cmd_list != NULL)
627 		kmem_free(instance->cmd_list, sz);
628 	instance->cmd_list = NULL;
629 
630 	return (DDI_FAILURE);
631 }
632 
633 
634 /*
635  * free_space_for_mpi2
636  */
637 void
638 free_space_for_mpi2(struct mrsas_instance *instance)
639 {
640 	/* already freed */
641 	if (instance->cmd_list == NULL) {
642 		return;
643 	}
644 
645 	/* First free the additional DMA buffer */
646 	mrsas_tbolt_free_additional_dma_buffer(instance);
647 
648 	/* Free the request/reply descriptor pool */
649 	free_req_rep_desc_pool(instance);
650 
651 	/*  Free the MPI message pool */
652 	destroy_mpi2_frame_pool(instance);
653 
654 	/* Free the MFI frame pool */
655 	destroy_mfi_frame_pool(instance);
656 
657 	/* Free all the commands in the cmd_list */
658 	/* Free the cmd_list buffer itself */
659 	mrsas_free_cmd_pool(instance);
660 }
661 
662 
663 /*
664  * ThunderBolt(TB) memory allocations for commands/messages/frames.
665  */
666 int
667 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 {
669 	/* Allocate command pool (memory for cmd_list & individual commands) */
670 	if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 		cmn_err(CE_WARN, "Error creating cmd pool");
672 		return (DDI_FAILURE);
673 	}
674 
675 	/* Initialize single reply size and Message size */
676 	instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 	instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678 
679 	instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 	    (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 	    sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 	instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 	    MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684 
685 	/* Reduce SG count by 1 to take care of group cmds feature in FW */
686 	instance->max_num_sge = (instance->max_sge_in_main_msg +
687 	    instance->max_sge_in_chain - 2);
688 	instance->chain_offset_mpt_msg =
689 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
690 	instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
691 	    sizeof (MPI2_SGE_IO_UNION)) / 16;
692 	instance->reply_read_index = 0;
693 
694 
695 	/* Allocate Request and Reply descriptors Array */
696 	/* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
697 	if (alloc_req_rep_desc(instance)) {
698 		cmn_err(CE_WARN,
699 		    "Error, allocating memory for descripter-pool");
700 		goto mpi2_undo_cmd_pool;
701 	}
702 	con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
703 	    instance->request_message_pool_phy));
704 
705 
706 	/* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 	if (create_mfi_frame_pool(instance)) {
708 		cmn_err(CE_WARN,
709 		    "Error, allocating memory for MFI frame-pool");
710 		goto mpi2_undo_descripter_pool;
711 	}
712 
713 
714 	/* Allocate MPI2 Message pool */
715 	/*
716 	 * Make sure the buffer is alligned to 256 for raid message packet
717 	 * create a io request pool and assign one frame to each cmd
718 	 */
719 
720 	if (create_mpi2_frame_pool(instance)) {
721 		cmn_err(CE_WARN,
722 		    "Error, allocating memory for MPI2 Message-pool");
723 		goto mpi2_undo_mfi_frame_pool;
724 	}
725 
726 #ifdef DEBUG
727 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
728 	    instance->max_sge_in_main_msg));
729 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
730 	    instance->max_sge_in_chain));
731 	con_log(CL_ANN1, (CE_CONT,
732 	    "[max_sge]0x%x", instance->max_num_sge));
733 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
734 	    instance->chain_offset_mpt_msg));
735 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
736 	    instance->chain_offset_io_req));
737 #endif
738 
739 
740 	/* Allocate additional dma buffer */
741 	if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
742 		cmn_err(CE_WARN,
743 		    "Error, allocating tbolt additional DMA buffer");
744 		goto mpi2_undo_message_pool;
745 	}
746 
747 	return (DDI_SUCCESS);
748 
749 mpi2_undo_message_pool:
750 	destroy_mpi2_frame_pool(instance);
751 
752 mpi2_undo_mfi_frame_pool:
753 	destroy_mfi_frame_pool(instance);
754 
755 mpi2_undo_descripter_pool:
756 	free_req_rep_desc_pool(instance);
757 
758 mpi2_undo_cmd_pool:
759 	mrsas_free_cmd_pool(instance);
760 
761 	return (DDI_FAILURE);
762 }
763 
764 
765 /*
766  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767  */
768 int
769 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
770 {
771 
772 	/*
773 	 * Reduce the max supported cmds by 1. This is to ensure that the
774 	 * reply_q_sz (1 more than the max cmd that driver may send)
775 	 * does not exceed max cmds that the FW can support
776 	 */
777 
778 	if (instance->max_fw_cmds > 1008) {
779 		instance->max_fw_cmds = 1008;
780 		instance->max_fw_cmds = instance->max_fw_cmds-1;
781 	}
782 
783 	con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 	    " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785 
786 
787 	/* create a pool of commands */
788 	if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 		cmn_err(CE_WARN,
790 		    " alloc_space_for_mpi2() failed.");
791 
792 		return (DDI_FAILURE);
793 	}
794 
795 	/* Send ioc init message */
796 	/* NOTE: the issue_init call does FMA checking already. */
797 	if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
798 		cmn_err(CE_WARN,
799 		    " mrsas_issue_init_mpi2() failed.");
800 
801 		goto fail_init_fusion;
802 	}
803 
804 	instance->unroll.alloc_space_mpi2 = 1;
805 
806 	con_log(CL_ANN, (CE_NOTE,
807 	    "mrsas_init_adapter_tbolt: SUCCESSFUL"));
808 
809 	return (DDI_SUCCESS);
810 
811 fail_init_fusion:
812 	free_space_for_mpi2(instance);
813 
814 	return (DDI_FAILURE);
815 }
816 
817 
818 
819 /*
820  * init_mpi2
821  */
822 int
823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
824 {
825 	dma_obj_t init2_dma_obj;
826 	int ret_val = DDI_SUCCESS;
827 
828 	/* allocate DMA buffer for IOC INIT message */
829 	init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
830 	init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
831 	init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
832 	init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
833 	init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
834 	init2_dma_obj.dma_attr.dma_attr_align = 256;
835 
836 	if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
837 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
838 		cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
839 		    "could not allocate data transfer buffer.");
840 		return (DDI_FAILURE);
841 	}
842 	(void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
843 
844 	con_log(CL_ANN1, (CE_NOTE,
845 	    "mrsas_issue_init_mpi2 _phys adr: %x",
846 	    init2_dma_obj.dma_cookie[0].dmac_address));
847 
848 
849 	/* Initialize and send ioc init message */
850 	ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
851 	if (ret_val == DDI_FAILURE) {
852 		con_log(CL_ANN1, (CE_WARN,
853 		    "mrsas_issue_init_mpi2: Failed"));
854 		goto fail_init_mpi2;
855 	}
856 
857 	/* free IOC init DMA buffer */
858 	if (mrsas_free_dma_obj(instance, init2_dma_obj)
859 	    != DDI_SUCCESS) {
860 		con_log(CL_ANN1, (CE_WARN,
861 		    "mrsas_issue_init_mpi2: Free Failed"));
862 		return (DDI_FAILURE);
863 	}
864 
865 	/* Get/Check and sync ld_map info */
866 	instance->map_id = 0;
867 	if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
868 		(void) mrsas_tbolt_sync_map_info(instance);
869 
870 
871 	/* No mrsas_cmd to send, so send NULL. */
872 	if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
873 		goto fail_init_mpi2;
874 
875 	con_log(CL_ANN, (CE_NOTE,
876 	    "mrsas_issue_init_mpi2: SUCCESSFUL"));
877 
878 	return (DDI_SUCCESS);
879 
880 fail_init_mpi2:
881 	(void) mrsas_free_dma_obj(instance, init2_dma_obj);
882 
883 	return (DDI_FAILURE);
884 }
885 
886 static int
887 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
888 {
889 	int				numbytes;
890 	uint16_t			flags;
891 	struct mrsas_init_frame2	*mfiFrameInit2;
892 	struct mrsas_header		*frame_hdr;
893 	Mpi2IOCInitRequest_t		*init;
894 	struct mrsas_cmd		*cmd = NULL;
895 	struct mrsas_drv_ver		drv_ver_info;
896 	MRSAS_REQUEST_DESCRIPTOR_UNION	*req_desc;
897 
898 	con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
899 
900 
901 #ifdef DEBUG
902 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
903 	    (int)sizeof (*mfiFrameInit2)));
904 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
905 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
906 	    (int)sizeof (struct mrsas_init_frame2)));
907 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
908 	    (int)sizeof (Mpi2IOCInitRequest_t)));
909 #endif
910 
911 	init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
912 	numbytes = sizeof (*init);
913 	bzero(init, numbytes);
914 
915 	ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
916 	    MPI2_FUNCTION_IOC_INIT);
917 
918 	ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
919 	    MPI2_WHOINIT_HOST_DRIVER);
920 
921 	/* set MsgVersion and HeaderVersion host driver was built with */
922 	ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
923 	    MPI2_VERSION);
924 
925 	ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
926 	    MPI2_HEADER_VERSION);
927 
928 	ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
929 	    instance->raid_io_msg_size / 4);
930 
931 	ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
932 	    0);
933 
934 	ddi_put16(mpi2_dma_obj->acc_handle,
935 	    &init->ReplyDescriptorPostQueueDepth,
936 	    instance->reply_q_depth);
937 	/*
938 	 * These addresses are set using the DMA cookie addresses from when the
939 	 * memory was allocated.  Sense buffer hi address should be 0.
940 	 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
941 	 */
942 
943 	ddi_put32(mpi2_dma_obj->acc_handle,
944 	    &init->SenseBufferAddressHigh, 0);
945 
946 	ddi_put64(mpi2_dma_obj->acc_handle,
947 	    (uint64_t *)&init->SystemRequestFrameBaseAddress,
948 	    instance->io_request_frames_phy);
949 
950 	ddi_put64(mpi2_dma_obj->acc_handle,
951 	    &init->ReplyDescriptorPostQueueAddress,
952 	    instance->reply_frame_pool_phy);
953 
954 	ddi_put64(mpi2_dma_obj->acc_handle,
955 	    &init->ReplyFreeQueueAddress, 0);
956 
957 	cmd = instance->cmd_list[0];
958 	if (cmd == NULL) {
959 		return (DDI_FAILURE);
960 	}
961 	cmd->retry_count_for_ocr = 0;
962 	cmd->pkt = NULL;
963 	cmd->drv_pkt_time = 0;
964 
965 	mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
966 	con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
967 
968 	frame_hdr = &cmd->frame->hdr;
969 
970 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
971 	    MFI_CMD_STATUS_POLL_MODE);
972 
973 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
974 
975 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
976 
977 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
978 
979 	con_log(CL_ANN, (CE_CONT,
980 	    "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
981 
982 	/* Init the MFI Header */
983 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
984 	    &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
985 
986 	con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
987 
988 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
989 	    &mfiFrameInit2->cmd_status,
990 	    MFI_STAT_INVALID_STATUS);
991 
992 	con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
993 
994 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
995 	    &mfiFrameInit2->queue_info_new_phys_addr_lo,
996 	    mpi2_dma_obj->dma_cookie[0].dmac_address);
997 
998 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
999 	    &mfiFrameInit2->data_xfer_len,
1000 	    sizeof (Mpi2IOCInitRequest_t));
1001 
1002 	con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1003 	    (int)init->ReplyDescriptorPostQueueAddress));
1004 
1005 	/* fill driver version information */
1006 	fill_up_drv_ver(&drv_ver_info);
1007 
1008 	/* allocate the driver version data transfer buffer */
1009 	instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1010 	instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1011 	instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1012 	instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1013 	instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1014 	instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1015 
1016 	if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1017 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1018 		cmn_err(CE_WARN,
1019 		    "fusion init: Could not allocate driver version buffer.");
1020 		return (DDI_FAILURE);
1021 	}
1022 	/* copy driver version to dma buffer */
1023 	bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1024 	ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1025 	    (uint8_t *)drv_ver_info.drv_ver,
1026 	    (uint8_t *)instance->drv_ver_dma_obj.buffer,
1027 	    sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1028 
1029 	/* send driver version physical address to firmware */
1030 	ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1031 	    instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1032 
1033 	con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1034 	    mfiFrameInit2->queue_info_new_phys_addr_lo,
1035 	    (int)sizeof (Mpi2IOCInitRequest_t)));
1036 
1037 	con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1038 
1039 	con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1040 	    cmd->scsi_io_request_phys_addr,
1041 	    (int)sizeof (struct mrsas_init_frame2)));
1042 
1043 	/* disable interrupts before sending INIT2 frame */
1044 	instance->func_ptr->disable_intr(instance);
1045 
1046 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1047 	    instance->request_message_pool;
1048 	req_desc->Words = cmd->scsi_io_request_phys_addr;
1049 	req_desc->MFAIo.RequestFlags =
1050 	    (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1051 
1052 	cmd->request_desc = req_desc;
1053 
1054 	/* issue the init frame */
1055 	instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1056 
1057 	con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1058 	con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1059 	    frame_hdr->cmd_status));
1060 
1061 	if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1062 	    &mfiFrameInit2->cmd_status) == 0) {
1063 		con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1064 	} else {
1065 		con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1066 		mrsas_dump_reply_desc(instance);
1067 		goto fail_ioc_init;
1068 	}
1069 
1070 	mrsas_dump_reply_desc(instance);
1071 
1072 	instance->unroll.verBuff = 1;
1073 
1074 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1075 
1076 	return (DDI_SUCCESS);
1077 
1078 
1079 fail_ioc_init:
1080 
1081 	(void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1082 
1083 	return (DDI_FAILURE);
1084 }
1085 
1086 int
1087 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1088 {
1089 	int i;
1090 	uint32_t wait_time = dump_io_wait_time;
1091 	for (i = 0; i < wait_time; i++) {
1092 		/*
1093 		 * Check For Outstanding poll Commands
1094 		 * except ldsync command and aen command
1095 		 */
1096 		if (instance->fw_outstanding <= 2) {
1097 			break;
1098 		}
1099 		drv_usecwait(10*MILLISEC);
1100 		/* complete commands from reply queue */
1101 		(void) mr_sas_tbolt_process_outstanding_cmd(instance);
1102 	}
1103 	if (instance->fw_outstanding > 2) {
1104 		return (1);
1105 	}
1106 	return (0);
1107 }
1108 /*
1109  * scsi_pkt handling
1110  *
1111  * Visible to the external world via the transport structure.
1112  */
1113 
1114 int
1115 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1116 {
1117 	struct mrsas_instance	*instance = ADDR2MR(ap);
1118 	struct scsa_cmd		*acmd = PKT2CMD(pkt);
1119 	struct mrsas_cmd	*cmd = NULL;
1120 	uchar_t			cmd_done = 0;
1121 
1122 	con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1123 	if (instance->deadadapter == 1) {
1124 		cmn_err(CE_WARN,
1125 		    "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1126 		    "for IO, as the HBA doesnt take any more IOs");
1127 		if (pkt) {
1128 			pkt->pkt_reason		= CMD_DEV_GONE;
1129 			pkt->pkt_statistics	= STAT_DISCON;
1130 		}
1131 		return (TRAN_FATAL_ERROR);
1132 	}
1133 	if (instance->adapterresetinprogress) {
1134 		con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1135 		    "returning mfi_pkt and setting TRAN_BUSY\n"));
1136 		return (TRAN_BUSY);
1137 	}
1138 	(void) mrsas_tbolt_prepare_pkt(acmd);
1139 
1140 	cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1141 
1142 	/*
1143 	 * Check if the command is already completed by the mrsas_build_cmd()
1144 	 * routine. In which case the busy_flag would be clear and scb will be
1145 	 * NULL and appropriate reason provided in pkt_reason field
1146 	 */
1147 	if (cmd_done) {
1148 		pkt->pkt_reason = CMD_CMPLT;
1149 		pkt->pkt_scbp[0] = STATUS_GOOD;
1150 		pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1151 		    | STATE_SENT_CMD;
1152 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1153 			(*pkt->pkt_comp)(pkt);
1154 		}
1155 
1156 		return (TRAN_ACCEPT);
1157 	}
1158 
1159 	if (cmd == NULL) {
1160 		return (TRAN_BUSY);
1161 	}
1162 
1163 
1164 	if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1165 		if (instance->fw_outstanding > instance->max_fw_cmds) {
1166 			cmn_err(CE_WARN,
1167 			    "Command Queue Full... Returning BUSY");
1168 			return_raid_msg_pkt(instance, cmd);
1169 			return (TRAN_BUSY);
1170 		}
1171 
1172 		/* Synchronize the Cmd frame for the controller */
1173 		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1174 		    DDI_DMA_SYNC_FORDEV);
1175 
1176 		con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1177 		    "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1178 		    cmd->index, cmd->SMID));
1179 
1180 		instance->func_ptr->issue_cmd(cmd, instance);
1181 	} else {
1182 		instance->func_ptr->issue_cmd(cmd, instance);
1183 		(void) wait_for_outstanding_poll_io(instance);
1184 		(void) mrsas_common_check(instance, cmd);
1185 	}
1186 
1187 	return (TRAN_ACCEPT);
1188 }
1189 
1190 /*
1191  * prepare the pkt:
1192  * the pkt may have been resubmitted or just reused so
1193  * initialize some fields and do some checks.
1194  */
1195 static int
1196 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1197 {
1198 	struct scsi_pkt	*pkt = CMD2PKT(acmd);
1199 
1200 
1201 	/*
1202 	 * Reinitialize some fields that need it; the packet may
1203 	 * have been resubmitted
1204 	 */
1205 	pkt->pkt_reason = CMD_CMPLT;
1206 	pkt->pkt_state = 0;
1207 	pkt->pkt_statistics = 0;
1208 	pkt->pkt_resid = 0;
1209 
1210 	/*
1211 	 * zero status byte.
1212 	 */
1213 	*(pkt->pkt_scbp) = 0;
1214 
1215 	return (0);
1216 }
1217 
1218 
1219 int
1220 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1221     struct scsa_cmd *acmd,
1222     struct mrsas_cmd *cmd,
1223     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1224     uint32_t *datalen)
1225 {
1226 	uint32_t		MaxSGEs;
1227 	int			sg_to_process;
1228 	uint32_t		i, j;
1229 	uint32_t		numElements, endElement;
1230 	Mpi25IeeeSgeChain64_t	*ieeeChainElement = NULL;
1231 	Mpi25IeeeSgeChain64_t	*scsi_raid_io_sgl_ieee = NULL;
1232 	ddi_acc_handle_t acc_handle =
1233 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1234 
1235 	con_log(CL_ANN1, (CE_NOTE,
1236 	    "chkpnt: Building Chained SGL :%d", __LINE__));
1237 
1238 	/* Calulate SGE size in number of Words(32bit) */
1239 	/* Clear the datalen before updating it. */
1240 	*datalen = 0;
1241 
1242 	MaxSGEs = instance->max_sge_in_main_msg;
1243 
1244 	ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1245 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1246 
1247 	/* set data transfer flag. */
1248 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1249 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1250 		    MPI2_SCSIIO_CONTROL_WRITE);
1251 	} else {
1252 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1253 		    MPI2_SCSIIO_CONTROL_READ);
1254 	}
1255 
1256 
1257 	numElements = acmd->cmd_cookiecnt;
1258 
1259 	con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1260 
1261 	if (numElements > instance->max_num_sge) {
1262 		con_log(CL_ANN, (CE_NOTE,
1263 		    "[Max SGE Count Exceeded]:%x", numElements));
1264 		return (numElements);
1265 	}
1266 
1267 	ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1268 	    (uint8_t)numElements);
1269 
1270 	/* set end element in main message frame */
1271 	endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1272 
1273 	/* prepare the scatter-gather list for the firmware */
1274 	scsi_raid_io_sgl_ieee =
1275 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1276 
1277 	if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1278 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1279 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1280 
1281 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1282 	}
1283 
1284 	for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1285 		ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1286 		    acmd->cmd_dmacookies[i].dmac_laddress);
1287 
1288 		ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1289 		    acmd->cmd_dmacookies[i].dmac_size);
1290 
1291 		ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1292 
1293 		if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1294 			if (i == (numElements - 1)) {
1295 				ddi_put8(acc_handle,
1296 				    &scsi_raid_io_sgl_ieee->Flags,
1297 				    IEEE_SGE_FLAGS_END_OF_LIST);
1298 			}
1299 		}
1300 
1301 		*datalen += acmd->cmd_dmacookies[i].dmac_size;
1302 
1303 #ifdef DEBUG
1304 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1305 		    scsi_raid_io_sgl_ieee->Address));
1306 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1307 		    scsi_raid_io_sgl_ieee->Length));
1308 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1309 		    scsi_raid_io_sgl_ieee->Flags));
1310 #endif
1311 
1312 	}
1313 
1314 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1315 
1316 	/* check if chained SGL required */
1317 	if (i < numElements) {
1318 
1319 		con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1320 
1321 		if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1322 			uint16_t ioFlags =
1323 			    ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1324 
1325 			if ((ioFlags &
1326 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1327 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1328 				ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1329 				    (U8)instance->chain_offset_io_req);
1330 			} else {
1331 				ddi_put8(acc_handle,
1332 				    &scsi_raid_io->ChainOffset, 0);
1333 			}
1334 		} else {
1335 			ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1336 			    (U8)instance->chain_offset_io_req);
1337 		}
1338 
1339 		/* prepare physical chain element */
1340 		ieeeChainElement = scsi_raid_io_sgl_ieee;
1341 
1342 		ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1343 
1344 		if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1345 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1346 			    IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1347 		} else {
1348 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1349 			    (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1350 			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1351 		}
1352 
1353 		ddi_put32(acc_handle, &ieeeChainElement->Length,
1354 		    (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1355 
1356 		ddi_put64(acc_handle, &ieeeChainElement->Address,
1357 		    (U64)cmd->sgl_phys_addr);
1358 
1359 		sg_to_process = numElements - i;
1360 
1361 		con_log(CL_ANN1, (CE_NOTE,
1362 		    "[Additional SGE Count]:%x", endElement));
1363 
1364 		/* point to the chained SGL buffer */
1365 		scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1366 
1367 		/* build rest of the SGL in chained buffer */
1368 		for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1369 			con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1370 
1371 			ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1372 			    acmd->cmd_dmacookies[i].dmac_laddress);
1373 
1374 			ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1375 			    acmd->cmd_dmacookies[i].dmac_size);
1376 
1377 			ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1378 
1379 			if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1380 				if (i == (numElements - 1)) {
1381 					ddi_put8(acc_handle,
1382 					    &scsi_raid_io_sgl_ieee->Flags,
1383 					    IEEE_SGE_FLAGS_END_OF_LIST);
1384 				}
1385 			}
1386 
1387 			*datalen += acmd->cmd_dmacookies[i].dmac_size;
1388 
1389 #if DEBUG
1390 			con_log(CL_DLEVEL1, (CE_NOTE,
1391 			    "[SGL Address]: %" PRIx64,
1392 			    scsi_raid_io_sgl_ieee->Address));
1393 			con_log(CL_DLEVEL1, (CE_NOTE,
1394 			    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1395 			con_log(CL_DLEVEL1, (CE_NOTE,
1396 			    "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1397 #endif
1398 
1399 			i++;
1400 		}
1401 	}
1402 
1403 	return (0);
1404 } /*end of BuildScatterGather */
1405 
1406 
1407 /*
1408  * build_cmd
1409  */
1410 static struct mrsas_cmd *
1411 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1412     struct scsi_pkt *pkt, uchar_t *cmd_done)
1413 {
1414 	uint8_t		fp_possible = 0;
1415 	uint32_t	index;
1416 	uint32_t	lba_count = 0;
1417 	uint32_t	start_lba_hi = 0;
1418 	uint32_t	start_lba_lo = 0;
1419 	ddi_acc_handle_t acc_handle =
1420 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1421 	struct mrsas_cmd		*cmd = NULL;
1422 	struct scsa_cmd			*acmd = PKT2CMD(pkt);
1423 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
1424 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
1425 	uint32_t			datalen;
1426 	struct IO_REQUEST_INFO io_info;
1427 	MR_FW_RAID_MAP_ALL *local_map_ptr;
1428 	uint16_t pd_cmd_cdblen;
1429 
1430 	con_log(CL_DLEVEL1, (CE_NOTE,
1431 	    "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1432 
1433 	/* find out if this is logical or physical drive command.  */
1434 	acmd->islogical = MRDRV_IS_LOGICAL(ap);
1435 	acmd->device_id = MAP_DEVICE_ID(instance, ap);
1436 
1437 	*cmd_done = 0;
1438 
1439 	/* get the command packet */
1440 	if (!(cmd = get_raid_msg_pkt(instance))) {
1441 		return (NULL);
1442 	}
1443 
1444 	index = cmd->index;
1445 	ReqDescUnion =	mr_sas_get_request_descriptor(instance, index);
1446 	ReqDescUnion->Words = 0;
1447 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1448 	ReqDescUnion->SCSIIO.RequestFlags =
1449 	    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1450 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1451 
1452 
1453 	cmd->request_desc = ReqDescUnion;
1454 	cmd->pkt = pkt;
1455 	cmd->cmd = acmd;
1456 
1457 	/* lets get the command directions */
1458 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1459 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1460 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1461 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1462 			    DDI_DMA_SYNC_FORDEV);
1463 		}
1464 	} else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1465 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1466 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1467 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1468 			    DDI_DMA_SYNC_FORCPU);
1469 		}
1470 	} else {
1471 		con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1472 	}
1473 
1474 
1475 	/* get SCSI_IO raid message frame pointer */
1476 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1477 
1478 	/* zero out SCSI_IO raid message frame */
1479 	bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1480 
1481 	/* Set the ldTargetId set by BuildRaidContext() */
1482 	ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1483 	    acmd->device_id);
1484 
1485 	/*  Copy CDB to scsi_io_request message frame */
1486 	ddi_rep_put8(acc_handle,
1487 	    (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1488 	    acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1489 
1490 	/*
1491 	 * Just the CDB length, rest of the Flags are zero
1492 	 * This will be modified later.
1493 	 */
1494 	ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1495 
1496 	pd_cmd_cdblen = acmd->cmd_cdblen;
1497 
1498 	switch (pkt->pkt_cdbp[0]) {
1499 	case SCMD_READ:
1500 	case SCMD_WRITE:
1501 	case SCMD_READ_G1:
1502 	case SCMD_WRITE_G1:
1503 	case SCMD_READ_G4:
1504 	case SCMD_WRITE_G4:
1505 	case SCMD_READ_G5:
1506 	case SCMD_WRITE_G5:
1507 
1508 		if (acmd->islogical) {
1509 			/* Initialize sense Information */
1510 			if (cmd->sense1 == NULL) {
1511 				con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1512 				    "Sense buffer ptr NULL "));
1513 			}
1514 			bzero(cmd->sense1, SENSE_LENGTH);
1515 			con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1516 			    "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1517 
1518 			if (acmd->cmd_cdblen == CDB_GROUP0) {
1519 				/* 6-byte cdb */
1520 				lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1521 				start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1522 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1523 				    ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1524 				    << 16));
1525 			} else if (acmd->cmd_cdblen == CDB_GROUP1) {
1526 				/* 10-byte cdb */
1527 				lba_count =
1528 				    (((uint16_t)(pkt->pkt_cdbp[8])) |
1529 				    ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1530 
1531 				start_lba_lo =
1532 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1533 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1534 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1535 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1536 
1537 			} else if (acmd->cmd_cdblen == CDB_GROUP5) {
1538 				/* 12-byte cdb */
1539 				lba_count = (
1540 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1541 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1542 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1543 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1544 
1545 				start_lba_lo =
1546 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1547 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1548 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1549 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1550 
1551 			} else if (acmd->cmd_cdblen == CDB_GROUP4) {
1552 				/* 16-byte cdb */
1553 				lba_count = (
1554 				    ((uint32_t)(pkt->pkt_cdbp[13])) |
1555 				    ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1556 				    ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1557 				    ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1558 
1559 				start_lba_lo = (
1560 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1561 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1562 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1563 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1564 
1565 				start_lba_hi = (
1566 				    ((uint32_t)(pkt->pkt_cdbp[5])) |
1567 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1568 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1569 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1570 			}
1571 
1572 			if (instance->tbolt &&
1573 			    ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1574 				cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1575 				    "controller limit 0x%x sectors",
1576 				    lba_count);
1577 			}
1578 
1579 			bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1580 			io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1581 			    start_lba_lo;
1582 			io_info.numBlocks = lba_count;
1583 			io_info.ldTgtId = acmd->device_id;
1584 
1585 			if (acmd->cmd_flags & CFLAG_DMASEND)
1586 				io_info.isRead = 0;
1587 			else
1588 				io_info.isRead = 1;
1589 
1590 
1591 			/* Acquire SYNC MAP UPDATE lock */
1592 			mutex_enter(&instance->sync_map_mtx);
1593 
1594 			local_map_ptr =
1595 			    instance->ld_map[(instance->map_id & 1)];
1596 
1597 			if ((MR_TargetIdToLdGet(
1598 			    acmd->device_id, local_map_ptr) >=
1599 			    MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1600 				cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1601 				    "targetId >= MAX_LOGICAL_DRIVES || "
1602 				    "!instance->fast_path_io");
1603 				fp_possible = 0;
1604 				/* Set Regionlock flags to BYPASS */
1605 				/* io_request->RaidContext.regLockFlags  = 0; */
1606 				ddi_put8(acc_handle,
1607 				    &scsi_raid_io->RaidContext.regLockFlags, 0);
1608 			} else {
1609 				if (MR_BuildRaidContext(instance, &io_info,
1610 				    &scsi_raid_io->RaidContext, local_map_ptr))
1611 					fp_possible = io_info.fpOkForIo;
1612 			}
1613 
1614 			if (!enable_fp)
1615 				fp_possible = 0;
1616 
1617 			con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1618 			    "instance->fast_path_io %d fp_possible %d",
1619 			    enable_fp, instance->fast_path_io, fp_possible));
1620 
1621 		if (fp_possible) {
1622 
1623 			/* Check for DIF enabled LD */
1624 			if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1625 				/* Prepare 32 Byte CDB for DIF capable Disk */
1626 				mrsas_tbolt_prepare_cdb(instance,
1627 				    scsi_raid_io->CDB.CDB32,
1628 				    &io_info, scsi_raid_io, start_lba_lo);
1629 			} else {
1630 				mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1631 				    (uint8_t *)&pd_cmd_cdblen,
1632 				    io_info.pdBlock, io_info.numBlocks);
1633 				ddi_put16(acc_handle,
1634 				    &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1635 			}
1636 
1637 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1638 			    MPI2_FUNCTION_SCSI_IO_REQUEST);
1639 
1640 			ReqDescUnion->SCSIIO.RequestFlags =
1641 			    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1642 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1643 
1644 			if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1645 				uint8_t regLockFlags = ddi_get8(acc_handle,
1646 				    &scsi_raid_io->RaidContext.regLockFlags);
1647 				uint16_t IoFlags = ddi_get16(acc_handle,
1648 				    &scsi_raid_io->IoFlags);
1649 
1650 				if (regLockFlags == REGION_TYPE_UNUSED)
1651 					ReqDescUnion->SCSIIO.RequestFlags =
1652 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1653 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1654 
1655 				IoFlags |=
1656 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1657 				regLockFlags |=
1658 				    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1659 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1660 
1661 				ddi_put8(acc_handle,
1662 				    &scsi_raid_io->ChainOffset, 0);
1663 				ddi_put8(acc_handle,
1664 				    &scsi_raid_io->RaidContext.nsegType,
1665 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1666 				    MPI2_TYPE_CUDA));
1667 				ddi_put8(acc_handle,
1668 				    &scsi_raid_io->RaidContext.regLockFlags,
1669 				    regLockFlags);
1670 				ddi_put16(acc_handle,
1671 				    &scsi_raid_io->IoFlags, IoFlags);
1672 			}
1673 
1674 			if ((instance->load_balance_info[
1675 			    acmd->device_id].loadBalanceFlag) &&
1676 			    (io_info.isRead)) {
1677 				io_info.devHandle =
1678 				    get_updated_dev_handle(&instance->
1679 				    load_balance_info[acmd->device_id],
1680 				    &io_info);
1681 				cmd->load_balance_flag |=
1682 				    MEGASAS_LOAD_BALANCE_FLAG;
1683 			} else {
1684 				cmd->load_balance_flag &=
1685 				    ~MEGASAS_LOAD_BALANCE_FLAG;
1686 			}
1687 
1688 			ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1689 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1690 			    io_info.devHandle);
1691 
1692 		} else {
1693 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1694 			    MPI2_FUNCTION_LD_IO_REQUEST);
1695 
1696 			ddi_put16(acc_handle,
1697 			    &scsi_raid_io->DevHandle, acmd->device_id);
1698 
1699 			ReqDescUnion->SCSIIO.RequestFlags =
1700 			    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1701 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1702 
1703 			ddi_put16(acc_handle,
1704 			    &scsi_raid_io->RaidContext.timeoutValue,
1705 			    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1706 
1707 			if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1708 				uint8_t regLockFlags = ddi_get8(acc_handle,
1709 				    &scsi_raid_io->RaidContext.regLockFlags);
1710 
1711 				if (regLockFlags == REGION_TYPE_UNUSED) {
1712 					ReqDescUnion->SCSIIO.RequestFlags =
1713 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1714 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1715 				}
1716 
1717 				regLockFlags |=
1718 				    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1719 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1720 
1721 				ddi_put8(acc_handle,
1722 				    &scsi_raid_io->RaidContext.nsegType,
1723 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1724 				    MPI2_TYPE_CUDA));
1725 				ddi_put8(acc_handle,
1726 				    &scsi_raid_io->RaidContext.regLockFlags,
1727 				    regLockFlags);
1728 			}
1729 		} /* Not FP */
1730 
1731 		/* Release SYNC MAP UPDATE lock */
1732 		mutex_exit(&instance->sync_map_mtx);
1733 
1734 
1735 		/*
1736 		 * Set sense buffer physical address/length in scsi_io_request.
1737 		 */
1738 		ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1739 		    cmd->sense_phys_addr1);
1740 		ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1741 		    SENSE_LENGTH);
1742 
1743 		/* Construct SGL */
1744 		ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1745 		    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1746 
1747 		(void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1748 		    scsi_raid_io, &datalen);
1749 
1750 		ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1751 
1752 		break;
1753 #ifndef PDSUPPORT	/* if PDSUPPORT, skip break and fall through */
1754 	} else {
1755 		break;
1756 #endif
1757 	}
1758 	/* fall through For all non-rd/wr cmds */
1759 	default:
1760 		switch (pkt->pkt_cdbp[0]) {
1761 		case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1762 			return_raid_msg_pkt(instance, cmd);
1763 			*cmd_done = 1;
1764 			return (NULL);
1765 		}
1766 
1767 		case SCMD_MODE_SENSE:
1768 		case SCMD_MODE_SENSE_G1: {
1769 			union scsi_cdb	*cdbp;
1770 			uint16_t	page_code;
1771 
1772 			cdbp = (void *)pkt->pkt_cdbp;
1773 			page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1774 			switch (page_code) {
1775 			case 0x3:
1776 			case 0x4:
1777 				(void) mrsas_mode_sense_build(pkt);
1778 				return_raid_msg_pkt(instance, cmd);
1779 				*cmd_done = 1;
1780 				return (NULL);
1781 			}
1782 			break;
1783 		}
1784 
1785 		default: {
1786 			/*
1787 			 * Here we need to handle PASSTHRU for
1788 			 * Logical Devices. Like Inquiry etc.
1789 			 */
1790 
1791 			if (!(acmd->islogical)) {
1792 
1793 				/* Acquire SYNC MAP UPDATE lock */
1794 				mutex_enter(&instance->sync_map_mtx);
1795 
1796 				local_map_ptr =
1797 				    instance->ld_map[(instance->map_id & 1)];
1798 
1799 				ddi_put8(acc_handle, &scsi_raid_io->Function,
1800 				    MPI2_FUNCTION_SCSI_IO_REQUEST);
1801 
1802 				ReqDescUnion->SCSIIO.RequestFlags =
1803 				    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1804 				    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1805 
1806 				ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1807 				    local_map_ptr->raidMap.
1808 				    devHndlInfo[acmd->device_id].curDevHdl);
1809 
1810 
1811 				/* Set regLockFlasgs to REGION_TYPE_BYPASS */
1812 				ddi_put8(acc_handle,
1813 				    &scsi_raid_io->RaidContext.regLockFlags, 0);
1814 				ddi_put64(acc_handle,
1815 				    &scsi_raid_io->RaidContext.regLockRowLBA,
1816 				    0);
1817 				ddi_put32(acc_handle,
1818 				    &scsi_raid_io->RaidContext.regLockLength,
1819 				    0);
1820 				ddi_put8(acc_handle,
1821 				    &scsi_raid_io->RaidContext.RAIDFlags,
1822 				    MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1823 				    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1824 				ddi_put16(acc_handle,
1825 				    &scsi_raid_io->RaidContext.timeoutValue,
1826 				    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1827 				ddi_put16(acc_handle,
1828 				    &scsi_raid_io->RaidContext.ldTargetId,
1829 				    acmd->device_id);
1830 				ddi_put8(acc_handle,
1831 				    &scsi_raid_io->LUN[1], acmd->lun);
1832 
1833 				/* Release SYNC MAP UPDATE lock */
1834 				mutex_exit(&instance->sync_map_mtx);
1835 
1836 			} else {
1837 				ddi_put8(acc_handle, &scsi_raid_io->Function,
1838 				    MPI2_FUNCTION_LD_IO_REQUEST);
1839 				ddi_put8(acc_handle,
1840 				    &scsi_raid_io->LUN[1], acmd->lun);
1841 				ddi_put16(acc_handle,
1842 				    &scsi_raid_io->DevHandle, acmd->device_id);
1843 				ReqDescUnion->SCSIIO.RequestFlags =
1844 				    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1845 				    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1846 			}
1847 
1848 			/*
1849 			 * Set sense buffer physical address/length in
1850 			 * scsi_io_request.
1851 			 */
1852 			ddi_put32(acc_handle,
1853 			    &scsi_raid_io->SenseBufferLowAddress,
1854 			    cmd->sense_phys_addr1);
1855 			ddi_put8(acc_handle,
1856 			    &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1857 
1858 			/* Construct SGL */
1859 			ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1860 			    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1861 
1862 			(void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1863 			    scsi_raid_io, &datalen);
1864 
1865 			ddi_put32(acc_handle,
1866 			    &scsi_raid_io->DataLength, datalen);
1867 
1868 
1869 			con_log(CL_ANN, (CE_CONT,
1870 			    "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1871 			    pkt->pkt_cdbp[0], acmd->device_id));
1872 			con_log(CL_DLEVEL1, (CE_CONT,
1873 			    "data length = %x\n",
1874 			    scsi_raid_io->DataLength));
1875 			con_log(CL_DLEVEL1, (CE_CONT,
1876 			    "cdb length = %x\n",
1877 			    acmd->cmd_cdblen));
1878 		}
1879 			break;
1880 		}
1881 
1882 	}
1883 
1884 	return (cmd);
1885 }
1886 
1887 uint32_t
1888 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1889 {
1890 	return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1891 }
1892 
1893 void
1894 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1895 {
1896 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1897 	atomic_add_16(&instance->fw_outstanding, 1);
1898 
1899 	struct scsi_pkt *pkt;
1900 
1901 	con_log(CL_ANN1,
1902 	    (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1903 
1904 	con_log(CL_DLEVEL1, (CE_CONT,
1905 	    " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1906 	con_log(CL_DLEVEL1, (CE_CONT,
1907 	    " [req desc low part] %x \n",
1908 	    (uint_t)(req_desc->Words & 0xffffffffff)));
1909 	con_log(CL_DLEVEL1, (CE_CONT,
1910 	    " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1911 	pkt = cmd->pkt;
1912 
1913 	if (pkt) {
1914 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1915 		    "ISSUED CMD TO FW : called : cmd:"
1916 		    ": %p instance : %p pkt : %p pkt_time : %x\n",
1917 		    gethrtime(), (void *)cmd, (void *)instance,
1918 		    (void *)pkt, cmd->drv_pkt_time));
1919 		if (instance->adapterresetinprogress) {
1920 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1921 			con_log(CL_ANN, (CE_NOTE,
1922 			    "TBOLT Reset the scsi_pkt timer"));
1923 		} else {
1924 			push_pending_mfi_pkt(instance, cmd);
1925 		}
1926 
1927 	} else {
1928 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1929 		    "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1930 		    "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1931 	}
1932 
1933 	/* Issue the command to the FW */
1934 	mutex_enter(&instance->reg_write_mtx);
1935 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1936 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1937 	mutex_exit(&instance->reg_write_mtx);
1938 }
1939 
1940 /*
1941  * issue_cmd_in_sync_mode
1942  */
1943 int
1944 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1945     struct mrsas_cmd *cmd)
1946 {
1947 	int		i;
1948 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1949 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1950 
1951 	struct mrsas_header	*hdr;
1952 	hdr = (struct mrsas_header *)&cmd->frame->hdr;
1953 
1954 	con_log(CL_ANN,
1955 	    (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1956 	    cmd->SMID));
1957 
1958 
1959 	if (instance->adapterresetinprogress) {
1960 		cmd->drv_pkt_time = ddi_get16
1961 		    (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1962 		if (cmd->drv_pkt_time < debug_timeout_g)
1963 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1964 		con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1965 		    "RESET-IN-PROGRESS, issue cmd & return."));
1966 
1967 		mutex_enter(&instance->reg_write_mtx);
1968 		WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1969 		WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1970 		mutex_exit(&instance->reg_write_mtx);
1971 
1972 		return (DDI_SUCCESS);
1973 	} else {
1974 		con_log(CL_ANN1, (CE_NOTE,
1975 		    "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1976 		push_pending_mfi_pkt(instance, cmd);
1977 	}
1978 
1979 	con_log(CL_DLEVEL2, (CE_NOTE,
1980 	    "HighQport offset :%p",
1981 	    (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1982 	con_log(CL_DLEVEL2, (CE_NOTE,
1983 	    "LowQport offset :%p",
1984 	    (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1985 
1986 	cmd->sync_cmd = MRSAS_TRUE;
1987 	cmd->cmd_status =  ENODATA;
1988 
1989 
1990 	mutex_enter(&instance->reg_write_mtx);
1991 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1992 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1993 	mutex_exit(&instance->reg_write_mtx);
1994 
1995 	con_log(CL_ANN1, (CE_NOTE,
1996 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1997 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1998 	    (uint_t)(req_desc->Words & 0xffffffff)));
1999 
2000 	mutex_enter(&instance->int_cmd_mtx);
2001 	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2002 		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2003 	}
2004 	mutex_exit(&instance->int_cmd_mtx);
2005 
2006 
2007 	if (i < (msecs -1)) {
2008 		return (DDI_SUCCESS);
2009 	} else {
2010 		return (DDI_FAILURE);
2011 	}
2012 }
2013 
2014 /*
2015  * issue_cmd_in_poll_mode
2016  */
2017 int
2018 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2019     struct mrsas_cmd *cmd)
2020 {
2021 	int		i;
2022 	uint16_t	flags;
2023 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2024 	struct mrsas_header *frame_hdr;
2025 
2026 	con_log(CL_ANN,
2027 	    (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2028 	    cmd->SMID));
2029 
2030 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2031 
2032 	frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2033 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2034 	    MFI_CMD_STATUS_POLL_MODE);
2035 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2036 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2037 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2038 
2039 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2040 	    (uint_t)(req_desc->Words & 0xffffffff)));
2041 	con_log(CL_ANN1, (CE_NOTE,
2042 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2043 
2044 	/* issue the frame using inbound queue port */
2045 	mutex_enter(&instance->reg_write_mtx);
2046 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2047 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2048 	mutex_exit(&instance->reg_write_mtx);
2049 
2050 	for (i = 0; i < msecs && (
2051 	    ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2052 	    == MFI_CMD_STATUS_POLL_MODE); i++) {
2053 		/* wait for cmd_status to change from 0xFF */
2054 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2055 	}
2056 
2057 	if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2058 	    &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2059 		con_log(CL_ANN1, (CE_NOTE,
2060 		    " cmd failed %" PRIx64, (req_desc->Words)));
2061 		return (DDI_FAILURE);
2062 	}
2063 
2064 	return (DDI_SUCCESS);
2065 }
2066 
2067 void
2068 tbolt_enable_intr(struct mrsas_instance *instance)
2069 {
2070 	/* TODO: For Thunderbolt/Invader also clear intr on enable */
2071 	/* writel(~0, &regs->outbound_intr_status); */
2072 	/* readl(&regs->outbound_intr_status); */
2073 
2074 	WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2075 
2076 	/* dummy read to force PCI flush */
2077 	(void) RD_OB_INTR_MASK(instance);
2078 
2079 }
2080 
2081 void
2082 tbolt_disable_intr(struct mrsas_instance *instance)
2083 {
2084 	uint32_t mask = 0xFFFFFFFF;
2085 
2086 	WR_OB_INTR_MASK(mask, instance);
2087 
2088 	/* Dummy readl to force pci flush */
2089 
2090 	(void) RD_OB_INTR_MASK(instance);
2091 }
2092 
2093 
2094 int
2095 tbolt_intr_ack(struct mrsas_instance *instance)
2096 {
2097 	uint32_t	status;
2098 
2099 	/* check if it is our interrupt */
2100 	status = RD_OB_INTR_STATUS(instance);
2101 	con_log(CL_ANN1, (CE_NOTE,
2102 	    "chkpnt: Entered tbolt_intr_ack status = %d", status));
2103 
2104 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2105 		return (DDI_INTR_UNCLAIMED);
2106 	}
2107 
2108 	if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2109 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2110 		return (DDI_INTR_UNCLAIMED);
2111 	}
2112 
2113 	if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2114 		/* clear the interrupt by writing back the same value */
2115 		WR_OB_INTR_STATUS(status, instance);
2116 		/* dummy READ */
2117 		(void) RD_OB_INTR_STATUS(instance);
2118 	}
2119 	return (DDI_INTR_CLAIMED);
2120 }
2121 
2122 /*
2123  * get_raid_msg_pkt : Get a command from the free pool
2124  * After successful allocation, the caller of this routine
2125  * must clear the frame buffer (memset to zero) before
2126  * using the packet further.
2127  *
2128  * ***** Note *****
2129  * After clearing the frame buffer the context id of the
2130  * frame buffer SHOULD be restored back.
2131  */
2132 
2133 struct mrsas_cmd *
2134 get_raid_msg_pkt(struct mrsas_instance *instance)
2135 {
2136 	mlist_t			*head = &instance->cmd_pool_list;
2137 	struct mrsas_cmd	*cmd = NULL;
2138 
2139 	mutex_enter(&instance->cmd_pool_mtx);
2140 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2141 
2142 
2143 	if (!mlist_empty(head)) {
2144 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2145 		mlist_del_init(head->next);
2146 	}
2147 	if (cmd != NULL) {
2148 		cmd->pkt = NULL;
2149 		cmd->retry_count_for_ocr = 0;
2150 		cmd->drv_pkt_time = 0;
2151 	}
2152 	mutex_exit(&instance->cmd_pool_mtx);
2153 
2154 	if (cmd != NULL)
2155 		bzero(cmd->scsi_io_request,
2156 		    sizeof (Mpi2RaidSCSIIORequest_t));
2157 	return (cmd);
2158 }
2159 
2160 struct mrsas_cmd *
2161 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2162 {
2163 	mlist_t			*head = &instance->cmd_app_pool_list;
2164 	struct mrsas_cmd	*cmd = NULL;
2165 
2166 	mutex_enter(&instance->cmd_app_pool_mtx);
2167 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2168 
2169 	if (!mlist_empty(head)) {
2170 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2171 		mlist_del_init(head->next);
2172 	}
2173 	if (cmd != NULL) {
2174 		cmd->retry_count_for_ocr = 0;
2175 		cmd->drv_pkt_time = 0;
2176 		cmd->pkt = NULL;
2177 		cmd->request_desc = NULL;
2178 
2179 	}
2180 
2181 	mutex_exit(&instance->cmd_app_pool_mtx);
2182 
2183 	if (cmd != NULL) {
2184 		bzero(cmd->scsi_io_request,
2185 		    sizeof (Mpi2RaidSCSIIORequest_t));
2186 	}
2187 
2188 	return (cmd);
2189 }
2190 
2191 /*
2192  * return_raid_msg_pkt : Return a cmd to free command pool
2193  */
2194 void
2195 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2196 {
2197 	mutex_enter(&instance->cmd_pool_mtx);
2198 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2199 
2200 
2201 	mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2202 
2203 	mutex_exit(&instance->cmd_pool_mtx);
2204 }
2205 
2206 void
2207 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2208 {
2209 	mutex_enter(&instance->cmd_app_pool_mtx);
2210 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2211 
2212 	mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2213 
2214 	mutex_exit(&instance->cmd_app_pool_mtx);
2215 }
2216 
2217 
2218 void
2219 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2220     struct mrsas_cmd *cmd)
2221 {
2222 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2223 	Mpi25IeeeSgeChain64_t		*scsi_raid_io_sgl_ieee;
2224 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
2225 	uint32_t			index;
2226 	ddi_acc_handle_t acc_handle =
2227 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2228 
2229 	if (!instance->tbolt) {
2230 		con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2231 		return;
2232 	}
2233 
2234 	index = cmd->index;
2235 
2236 	ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2237 
2238 	if (!ReqDescUnion) {
2239 		con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2240 		return;
2241 	}
2242 
2243 	con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2244 
2245 	ReqDescUnion->Words = 0;
2246 
2247 	ReqDescUnion->SCSIIO.RequestFlags =
2248 	    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2249 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2250 
2251 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2252 
2253 	cmd->request_desc = ReqDescUnion;
2254 
2255 	/* get raid message frame pointer */
2256 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2257 
2258 	if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2259 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2260 		    &scsi_raid_io->SGL.IeeeChain;
2261 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2262 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2263 	}
2264 
2265 	ddi_put8(acc_handle, &scsi_raid_io->Function,
2266 	    MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2267 
2268 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2269 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2270 
2271 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2272 	    (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2273 
2274 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2275 	    cmd->sense_phys_addr1);
2276 
2277 
2278 	scsi_raid_io_sgl_ieee =
2279 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2280 
2281 	ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2282 	    (U64)cmd->frame_phys_addr);
2283 
2284 	ddi_put8(acc_handle,
2285 	    &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2286 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2287 	/* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2288 	ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2289 
2290 	con_log(CL_ANN1, (CE_NOTE,
2291 	    "[MFI CMD PHY ADDRESS]:%" PRIx64,
2292 	    scsi_raid_io_sgl_ieee->Address));
2293 	con_log(CL_ANN1, (CE_NOTE,
2294 	    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2295 	con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2296 	    scsi_raid_io_sgl_ieee->Flags));
2297 }
2298 
2299 
2300 void
2301 tbolt_complete_cmd(struct mrsas_instance *instance,
2302     struct mrsas_cmd *cmd)
2303 {
2304 	uint8_t				status;
2305 	uint8_t				extStatus;
2306 	uint8_t				arm;
2307 	struct scsa_cmd			*acmd;
2308 	struct scsi_pkt			*pkt;
2309 	struct scsi_arq_status		*arqstat;
2310 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2311 	LD_LOAD_BALANCE_INFO		*lbinfo;
2312 	ddi_acc_handle_t acc_handle =
2313 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2314 
2315 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2316 
2317 	status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2318 	extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2319 
2320 	con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2321 	con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2322 
2323 	if (status != MFI_STAT_OK) {
2324 		con_log(CL_ANN, (CE_WARN,
2325 		    "IO Cmd Failed SMID %x", cmd->SMID));
2326 	} else {
2327 		con_log(CL_ANN, (CE_NOTE,
2328 		    "IO Cmd Success  SMID %x", cmd->SMID));
2329 	}
2330 
2331 	/* regular commands */
2332 
2333 	switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2334 
2335 	case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2336 		acmd =	(struct scsa_cmd *)cmd->cmd;
2337 		lbinfo = &instance->load_balance_info[acmd->device_id];
2338 
2339 		if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2340 			arm = lbinfo->raid1DevHandle[0] ==
2341 			    scsi_raid_io->DevHandle ? 0 : 1;
2342 
2343 			lbinfo->scsi_pending_cmds[arm]--;
2344 			cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2345 		}
2346 		con_log(CL_DLEVEL3, (CE_NOTE,
2347 		    "FastPath IO Completion Success "));
2348 		/* FALLTHRU */
2349 
2350 	case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2351 		acmd =	(struct scsa_cmd *)cmd->cmd;
2352 		pkt =	(struct scsi_pkt *)CMD2PKT(acmd);
2353 
2354 		if (acmd->cmd_flags & CFLAG_DMAVALID) {
2355 			if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2356 				(void) ddi_dma_sync(acmd->cmd_dmahandle,
2357 				    acmd->cmd_dma_offset, acmd->cmd_dma_len,
2358 				    DDI_DMA_SYNC_FORCPU);
2359 			}
2360 		}
2361 
2362 		pkt->pkt_reason		= CMD_CMPLT;
2363 		pkt->pkt_statistics	= 0;
2364 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2365 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2366 
2367 		con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2368 		    "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2369 		    ((acmd->islogical) ? "LD" : "PD"),
2370 		    acmd->cmd_dmacount, cmd->SMID, status));
2371 
2372 		if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2373 			struct scsi_inquiry	*inq;
2374 
2375 			if (acmd->cmd_dmacount != 0) {
2376 				bp_mapin(acmd->cmd_buf);
2377 				inq = (struct scsi_inquiry *)
2378 				    acmd->cmd_buf->b_un.b_addr;
2379 
2380 				/* don't expose physical drives to OS */
2381 				if (acmd->islogical &&
2382 				    (status == MFI_STAT_OK)) {
2383 					display_scsi_inquiry((caddr_t)inq);
2384 #ifdef PDSUPPORT
2385 				} else if ((status == MFI_STAT_OK) &&
2386 				    inq->inq_dtype == DTYPE_DIRECT) {
2387 					display_scsi_inquiry((caddr_t)inq);
2388 #endif
2389 				} else {
2390 					/* for physical disk */
2391 					status = MFI_STAT_DEVICE_NOT_FOUND;
2392 				}
2393 			}
2394 		}
2395 
2396 		switch (status) {
2397 		case MFI_STAT_OK:
2398 			pkt->pkt_scbp[0] = STATUS_GOOD;
2399 			break;
2400 		case MFI_STAT_LD_CC_IN_PROGRESS:
2401 		case MFI_STAT_LD_RECON_IN_PROGRESS:
2402 			pkt->pkt_scbp[0] = STATUS_GOOD;
2403 			break;
2404 		case MFI_STAT_LD_INIT_IN_PROGRESS:
2405 			pkt->pkt_reason	= CMD_TRAN_ERR;
2406 			break;
2407 		case MFI_STAT_SCSI_IO_FAILED:
2408 			cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2409 			pkt->pkt_reason	= CMD_TRAN_ERR;
2410 			break;
2411 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
2412 			con_log(CL_ANN, (CE_WARN,
2413 			    "tbolt_complete_cmd: scsi_done with error"));
2414 
2415 			pkt->pkt_reason	= CMD_CMPLT;
2416 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2417 
2418 			if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2419 				con_log(CL_ANN,
2420 				    (CE_WARN, "TEST_UNIT_READY fail"));
2421 			} else {
2422 				pkt->pkt_state |= STATE_ARQ_DONE;
2423 				arqstat = (void *)(pkt->pkt_scbp);
2424 				arqstat->sts_rqpkt_reason = CMD_CMPLT;
2425 				arqstat->sts_rqpkt_resid = 0;
2426 				arqstat->sts_rqpkt_state |=
2427 				    STATE_GOT_BUS | STATE_GOT_TARGET
2428 				    | STATE_SENT_CMD
2429 				    | STATE_XFERRED_DATA;
2430 				*(uint8_t *)&arqstat->sts_rqpkt_status =
2431 				    STATUS_GOOD;
2432 				con_log(CL_ANN1,
2433 				    (CE_NOTE, "Copying Sense data %x",
2434 				    cmd->SMID));
2435 
2436 				ddi_rep_get8(acc_handle,
2437 				    (uint8_t *)&(arqstat->sts_sensedata),
2438 				    cmd->sense1,
2439 				    sizeof (struct scsi_extended_sense),
2440 				    DDI_DEV_AUTOINCR);
2441 
2442 			}
2443 			break;
2444 		case MFI_STAT_LD_OFFLINE:
2445 			cmn_err(CE_WARN,
2446 			    "tbolt_complete_cmd: ld offline "
2447 			    "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2448 			    /* UNDO: */
2449 			    ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2450 
2451 			    ddi_get16(acc_handle,
2452 			    &scsi_raid_io->RaidContext.ldTargetId),
2453 
2454 			    ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2455 
2456 			pkt->pkt_reason	= CMD_DEV_GONE;
2457 			pkt->pkt_statistics  = STAT_DISCON;
2458 			break;
2459 		case MFI_STAT_DEVICE_NOT_FOUND:
2460 			con_log(CL_ANN, (CE_CONT,
2461 			    "tbolt_complete_cmd: device not found error"));
2462 			pkt->pkt_reason	= CMD_DEV_GONE;
2463 			pkt->pkt_statistics  = STAT_DISCON;
2464 			break;
2465 
2466 		case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2467 			pkt->pkt_state |= STATE_ARQ_DONE;
2468 			pkt->pkt_reason	= CMD_CMPLT;
2469 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2470 
2471 			arqstat = (void *)(pkt->pkt_scbp);
2472 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
2473 			arqstat->sts_rqpkt_resid = 0;
2474 			arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2475 			    | STATE_GOT_TARGET | STATE_SENT_CMD
2476 			    | STATE_XFERRED_DATA;
2477 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2478 
2479 			arqstat->sts_sensedata.es_valid = 1;
2480 			arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2481 			arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2482 
2483 			/*
2484 			 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2485 			 * ASC: 0x21h; ASCQ: 0x00h;
2486 			 */
2487 			arqstat->sts_sensedata.es_add_code = 0x21;
2488 			arqstat->sts_sensedata.es_qual_code = 0x00;
2489 			break;
2490 		case MFI_STAT_INVALID_CMD:
2491 		case MFI_STAT_INVALID_DCMD:
2492 		case MFI_STAT_INVALID_PARAMETER:
2493 		case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2494 		default:
2495 			cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2496 			pkt->pkt_reason	= CMD_TRAN_ERR;
2497 
2498 			break;
2499 		}
2500 
2501 		atomic_add_16(&instance->fw_outstanding, (-1));
2502 
2503 		(void) mrsas_common_check(instance, cmd);
2504 		if (acmd->cmd_dmahandle) {
2505 			if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2506 			    DDI_SUCCESS) {
2507 				ddi_fm_service_impact(instance->dip,
2508 				    DDI_SERVICE_UNAFFECTED);
2509 				pkt->pkt_reason = CMD_TRAN_ERR;
2510 				pkt->pkt_statistics = 0;
2511 			}
2512 		}
2513 
2514 		/* Call the callback routine */
2515 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2516 			(*pkt->pkt_comp)(pkt);
2517 
2518 		con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2519 
2520 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2521 
2522 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2523 
2524 		return_raid_msg_pkt(instance, cmd);
2525 		break;
2526 	}
2527 	case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	 /* MFA command. */
2528 
2529 		if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2530 		    cmd->frame->dcmd.mbox.b[1] == 1) {
2531 
2532 			mutex_enter(&instance->sync_map_mtx);
2533 
2534 			con_log(CL_ANN, (CE_NOTE,
2535 			    "LDMAP sync command	SMID RECEIVED 0x%X",
2536 			    cmd->SMID));
2537 			if (cmd->frame->hdr.cmd_status != 0) {
2538 				cmn_err(CE_WARN,
2539 				    "map sync failed, status = 0x%x.",
2540 				    cmd->frame->hdr.cmd_status);
2541 			} else {
2542 				instance->map_id++;
2543 				cmn_err(CE_NOTE,
2544 				    "map sync received, switched map_id to %"
2545 				    PRIu64 " \n", instance->map_id);
2546 			}
2547 
2548 			if (MR_ValidateMapInfo(instance->ld_map[
2549 			    (instance->map_id & 1)],
2550 			    instance->load_balance_info)) {
2551 				instance->fast_path_io = 1;
2552 			} else {
2553 				instance->fast_path_io = 0;
2554 			}
2555 
2556 			con_log(CL_ANN, (CE_NOTE,
2557 			    "instance->fast_path_io %d",
2558 			    instance->fast_path_io));
2559 
2560 			instance->unroll.syncCmd = 0;
2561 
2562 			if (instance->map_update_cmd == cmd) {
2563 				return_raid_msg_pkt(instance, cmd);
2564 				atomic_add_16(&instance->fw_outstanding, (-1));
2565 				(void) mrsas_tbolt_sync_map_info(instance);
2566 			}
2567 
2568 			cmn_err(CE_NOTE, "LDMAP sync completed.");
2569 			mutex_exit(&instance->sync_map_mtx);
2570 			break;
2571 		}
2572 
2573 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2574 			con_log(CL_ANN1, (CE_CONT,
2575 			    "AEN command SMID RECEIVED 0x%X",
2576 			    cmd->SMID));
2577 			if ((instance->aen_cmd == cmd) &&
2578 			    (instance->aen_cmd->abort_aen)) {
2579 				con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2580 				    "aborted_aen returned"));
2581 			} else {
2582 				atomic_add_16(&instance->fw_outstanding, (-1));
2583 				service_mfi_aen(instance, cmd);
2584 			}
2585 		}
2586 
2587 		if (cmd->sync_cmd == MRSAS_TRUE) {
2588 			con_log(CL_ANN1, (CE_CONT,
2589 			    "Sync-mode Command Response SMID RECEIVED 0x%X",
2590 			    cmd->SMID));
2591 
2592 			tbolt_complete_cmd_in_sync_mode(instance, cmd);
2593 		} else {
2594 			con_log(CL_ANN, (CE_CONT,
2595 			    "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2596 			    cmd->SMID));
2597 		}
2598 		break;
2599 	default:
2600 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2601 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2602 
2603 		/* free message */
2604 		con_log(CL_ANN,
2605 		    (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2606 		break;
2607 	}
2608 }
2609 
2610 uint_t
2611 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2612 {
2613 	uint8_t				replyType;
2614 	Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2615 	Mpi2ReplyDescriptorsUnion_t	*desc;
2616 	uint16_t			smid;
2617 	union desc_value		d_val;
2618 	struct mrsas_cmd		*cmd;
2619 
2620 	struct mrsas_header	*hdr;
2621 	struct scsi_pkt		*pkt;
2622 
2623 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2624 	    0, 0, DDI_DMA_SYNC_FORDEV);
2625 
2626 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2627 	    0, 0, DDI_DMA_SYNC_FORCPU);
2628 
2629 	desc = instance->reply_frame_pool;
2630 	desc += instance->reply_read_index;
2631 
2632 	replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2633 	replyType = replyDesc->ReplyFlags &
2634 	    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2635 
2636 	if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2637 		return (DDI_INTR_UNCLAIMED);
2638 
2639 	if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2640 	    != DDI_SUCCESS) {
2641 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2642 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2643 		con_log(CL_ANN1,
2644 		    (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2645 		    "FMA check, returning DDI_INTR_UNCLAIMED"));
2646 		return (DDI_INTR_CLAIMED);
2647 	}
2648 
2649 	con_log(CL_ANN1, (CE_NOTE, "Reply Desc	= %p  Words = %" PRIx64,
2650 	    (void *)desc, desc->Words));
2651 
2652 	d_val.word = desc->Words;
2653 
2654 
2655 	/* Read Reply descriptor */
2656 	while ((d_val.u1.low != 0xffffffff) &&
2657 	    (d_val.u1.high != 0xffffffff)) {
2658 
2659 		(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2660 		    0, 0, DDI_DMA_SYNC_FORCPU);
2661 
2662 		smid = replyDesc->SMID;
2663 
2664 		if (!smid || smid > instance->max_fw_cmds + 1) {
2665 			con_log(CL_ANN1, (CE_NOTE,
2666 			    "Reply Desc at Break  = %p	Words = %" PRIx64,
2667 			    (void *)desc, desc->Words));
2668 			break;
2669 		}
2670 
2671 		cmd	= instance->cmd_list[smid - 1];
2672 		if (!cmd) {
2673 			con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2674 			    "outstanding_cmd: Invalid command "
2675 			    " or Poll commad Received in completion path"));
2676 		} else {
2677 			mutex_enter(&instance->cmd_pend_mtx);
2678 			if (cmd->sync_cmd == MRSAS_TRUE) {
2679 				hdr = (struct mrsas_header *)&cmd->frame->hdr;
2680 				if (hdr) {
2681 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2682 					    "tbolt_process_outstanding_cmd:"
2683 					    " mlist_del_init(&cmd->list)."));
2684 					mlist_del_init(&cmd->list);
2685 				}
2686 			} else {
2687 				pkt = cmd->pkt;
2688 				if (pkt) {
2689 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2690 					    "tbolt_process_outstanding_cmd:"
2691 					    "mlist_del_init(&cmd->list)."));
2692 					mlist_del_init(&cmd->list);
2693 				}
2694 			}
2695 
2696 			mutex_exit(&instance->cmd_pend_mtx);
2697 
2698 			tbolt_complete_cmd(instance, cmd);
2699 		}
2700 		/* set it back to all 1s. */
2701 		desc->Words = -1LL;
2702 
2703 		instance->reply_read_index++;
2704 
2705 		if (instance->reply_read_index >= (instance->reply_q_depth)) {
2706 			con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2707 			instance->reply_read_index = 0;
2708 		}
2709 
2710 		/* Get the next reply descriptor */
2711 		if (!instance->reply_read_index)
2712 			desc = instance->reply_frame_pool;
2713 		else
2714 			desc++;
2715 
2716 		replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2717 
2718 		d_val.word = desc->Words;
2719 
2720 		con_log(CL_ANN1, (CE_NOTE,
2721 		    "Next Reply Desc  = %p Words = %" PRIx64,
2722 		    (void *)desc, desc->Words));
2723 
2724 		replyType = replyDesc->ReplyFlags &
2725 		    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2726 
2727 		if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2728 			break;
2729 
2730 	} /* End of while loop. */
2731 
2732 	/* update replyIndex to FW */
2733 	WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2734 
2735 
2736 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2737 	    0, 0, DDI_DMA_SYNC_FORDEV);
2738 
2739 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2740 	    0, 0, DDI_DMA_SYNC_FORCPU);
2741 	return (DDI_INTR_CLAIMED);
2742 }
2743 
2744 
2745 
2746 
2747 /*
2748  * complete_cmd_in_sync_mode -	Completes an internal command
2749  * @instance:			Adapter soft state
2750  * @cmd:			Command to be completed
2751  *
2752  * The issue_cmd_in_sync_mode() function waits for a command to complete
2753  * after it issues a command. This function wakes up that waiting routine by
2754  * calling wake_up() on the wait queue.
2755  */
2756 void
2757 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2758     struct mrsas_cmd *cmd)
2759 {
2760 
2761 	cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2762 	    &cmd->frame->io.cmd_status);
2763 
2764 	cmd->sync_cmd = MRSAS_FALSE;
2765 
2766 	mutex_enter(&instance->int_cmd_mtx);
2767 	if (cmd->cmd_status == ENODATA) {
2768 		cmd->cmd_status = 0;
2769 	}
2770 	cv_broadcast(&instance->int_cmd_cv);
2771 	mutex_exit(&instance->int_cmd_mtx);
2772 
2773 }
2774 
2775 /*
2776  * mrsas_tbolt_get_ld_map_info -	Returns	 ld_map structure
2777  * instance:				Adapter soft state
2778  *
2779  * Issues an internal command (DCMD) to get the FW's controller PD
2780  * list structure.  This information is mainly used to find out SYSTEM
2781  * supported by the FW.
2782  */
2783 int
2784 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2785 {
2786 	int ret = 0;
2787 	struct mrsas_cmd	*cmd = NULL;
2788 	struct mrsas_dcmd_frame	*dcmd;
2789 	MR_FW_RAID_MAP_ALL *ci;
2790 	uint32_t ci_h = 0;
2791 	U32 size_map_info;
2792 
2793 	cmd = get_raid_msg_pkt(instance);
2794 
2795 	if (cmd == NULL) {
2796 		cmn_err(CE_WARN,
2797 		    "Failed to get a cmd from free-pool in get_ld_map_info()");
2798 		return (DDI_FAILURE);
2799 	}
2800 
2801 	dcmd = &cmd->frame->dcmd;
2802 
2803 	size_map_info =	sizeof (MR_FW_RAID_MAP) +
2804 	    (sizeof (MR_LD_SPAN_MAP) *
2805 	    (MAX_LOGICAL_DRIVES - 1));
2806 
2807 	con_log(CL_ANN, (CE_NOTE,
2808 	    "size_map_info : 0x%x", size_map_info));
2809 
2810 	ci = instance->ld_map[(instance->map_id & 1)];
2811 	ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2812 
2813 	if (!ci) {
2814 		cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2815 		return_raid_msg_pkt(instance, cmd);
2816 		return (-1);
2817 	}
2818 
2819 	bzero(ci, sizeof (*ci));
2820 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2821 
2822 	dcmd->cmd = MFI_CMD_OP_DCMD;
2823 	dcmd->cmd_status = 0xFF;
2824 	dcmd->sge_count = 1;
2825 	dcmd->flags = MFI_FRAME_DIR_READ;
2826 	dcmd->timeout = 0;
2827 	dcmd->pad_0 = 0;
2828 	dcmd->data_xfer_len = size_map_info;
2829 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2830 	dcmd->sgl.sge32[0].phys_addr = ci_h;
2831 	dcmd->sgl.sge32[0].length = size_map_info;
2832 
2833 
2834 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2835 
2836 	if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2837 		ret = 0;
2838 		con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2839 	} else {
2840 		cmn_err(CE_WARN, "Get LD Map Info failed");
2841 		ret = -1;
2842 	}
2843 
2844 	return_raid_msg_pkt(instance, cmd);
2845 
2846 	return (ret);
2847 }
2848 
2849 void
2850 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2851 {
2852 	uint32_t i;
2853 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2854 	union desc_value d_val;
2855 
2856 	reply_desc = instance->reply_frame_pool;
2857 
2858 	for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2859 		d_val.word = reply_desc->Words;
2860 		con_log(CL_DLEVEL3, (CE_NOTE,
2861 		    "i=%d, %x:%x",
2862 		    i, d_val.u1.high, d_val.u1.low));
2863 	}
2864 }
2865 
2866 /*
2867  * mrsas_tbolt_command_create -	Create command for fast path.
2868  * @io_info:	MegaRAID IO request packet pointer.
2869  * @ref_tag:	Reference tag for RD/WRPROTECT
2870  *
2871  * Create the command for fast path.
2872  */
2873 void
2874 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2875     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2876     U32 ref_tag)
2877 {
2878 	uint16_t		EEDPFlags;
2879 	uint32_t		Control;
2880 	ddi_acc_handle_t acc_handle =
2881 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2882 
2883 	/* Prepare 32-byte CDB if DIF is supported on this device */
2884 	con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2885 
2886 	bzero(cdb, 32);
2887 
2888 	cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2889 
2890 
2891 	cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2892 
2893 	if (io_info->isRead)
2894 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2895 	else
2896 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2897 
2898 	/* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2899 	cdb[10] = MRSAS_RD_WR_PROTECT;
2900 
2901 	/* LOGICAL BLOCK ADDRESS */
2902 	cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2903 	cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2904 	cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2905 	cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2906 	cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2907 	cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2908 	cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2909 	cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2910 
2911 	/* Logical block reference tag */
2912 	ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2913 	    BE_32(ref_tag));
2914 
2915 	ddi_put16(acc_handle,
2916 	    &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2917 
2918 	ddi_put32(acc_handle, &scsi_io_request->DataLength,
2919 	    ((io_info->numBlocks)*512));
2920 	/* Specify 32-byte cdb */
2921 	ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2922 
2923 	/* Transfer length */
2924 	cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2925 	cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2926 	cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2927 	cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2928 
2929 	/* set SCSI IO EEDPFlags */
2930 	EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2931 	Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2932 
2933 	/* set SCSI IO EEDPFlags bits */
2934 	if (io_info->isRead) {
2935 		/*
2936 		 * For READ commands, the EEDPFlags shall be set to specify to
2937 		 * Increment the Primary Reference Tag, to Check the Reference
2938 		 * Tag, and to Check and Remove the Protection Information
2939 		 * fields.
2940 		 */
2941 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2942 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG	|
2943 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP	|
2944 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG	|
2945 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2946 	} else {
2947 		/*
2948 		 * For WRITE commands, the EEDPFlags shall be set to specify to
2949 		 * Increment the Primary Reference Tag, and to Insert
2950 		 * Protection Information fields.
2951 		 */
2952 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2953 		    MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2954 	}
2955 	Control |= (0x4 << 26);
2956 
2957 	ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2958 	ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2959 	ddi_put32(acc_handle,
2960 	    &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2961 }
2962 
2963 
2964 /*
2965  * mrsas_tbolt_set_pd_lba -	Sets PD LBA
2966  * @cdb:		CDB
2967  * @cdb_len:		cdb length
2968  * @start_blk:		Start block of IO
2969  *
2970  * Used to set the PD LBA in CDB for FP IOs
2971  */
2972 static void
2973 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2974     U32 num_blocks)
2975 {
2976 	U8 cdb_len = *cdb_len_ptr;
2977 	U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2978 
2979 	/* Some drives don't support 16/12 byte CDB's, convert to 10 */
2980 	if (((cdb_len == 12) || (cdb_len == 16)) &&
2981 	    (start_blk <= 0xffffffff)) {
2982 		if (cdb_len == 16) {
2983 			con_log(CL_ANN,
2984 			    (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2985 			opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2986 			flagvals = cdb[1];
2987 			groupnum = cdb[14];
2988 			control = cdb[15];
2989 		} else {
2990 			con_log(CL_ANN,
2991 			    (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2992 			opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2993 			flagvals = cdb[1];
2994 			groupnum = cdb[10];
2995 			control = cdb[11];
2996 		}
2997 
2998 		bzero(cdb, sizeof (cdb));
2999 
3000 		cdb[0] = opcode;
3001 		cdb[1] = flagvals;
3002 		cdb[6] = groupnum;
3003 		cdb[9] = control;
3004 		/* Set transfer length */
3005 		cdb[8] = (U8)(num_blocks & 0xff);
3006 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3007 		cdb_len = 10;
3008 	} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3009 		/* Convert to 16 byte CDB for large LBA's */
3010 		con_log(CL_ANN,
3011 		    (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3012 		switch (cdb_len) {
3013 		case 6:
3014 			opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3015 			control = cdb[5];
3016 			break;
3017 		case 10:
3018 			opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3019 			flagvals = cdb[1];
3020 			groupnum = cdb[6];
3021 			control = cdb[9];
3022 			break;
3023 		case 12:
3024 			opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3025 			flagvals = cdb[1];
3026 			groupnum = cdb[10];
3027 			control = cdb[11];
3028 			break;
3029 		}
3030 
3031 		bzero(cdb, sizeof (cdb));
3032 
3033 		cdb[0] = opcode;
3034 		cdb[1] = flagvals;
3035 		cdb[14] = groupnum;
3036 		cdb[15] = control;
3037 
3038 		/* Transfer length */
3039 		cdb[13] = (U8)(num_blocks & 0xff);
3040 		cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3041 		cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3042 		cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3043 
3044 		/* Specify 16-byte cdb */
3045 		cdb_len = 16;
3046 	} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3047 		/* convert to 10 byte CDB */
3048 		opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3049 		control = cdb[5];
3050 
3051 		bzero(cdb, sizeof (cdb));
3052 		cdb[0] = opcode;
3053 		cdb[9] = control;
3054 
3055 		/* Set transfer length */
3056 		cdb[8] = (U8)(num_blocks & 0xff);
3057 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3058 
3059 		/* Specify 10-byte cdb */
3060 		cdb_len = 10;
3061 	}
3062 
3063 
3064 	/* Fall through Normal case, just load LBA here */
3065 	switch (cdb_len) {
3066 	case 6:
3067 	{
3068 		U8 val = cdb[1] & 0xE0;
3069 		cdb[3] = (U8)(start_blk & 0xff);
3070 		cdb[2] = (U8)((start_blk >> 8) & 0xff);
3071 		cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3072 		break;
3073 	}
3074 	case 10:
3075 		cdb[5] = (U8)(start_blk & 0xff);
3076 		cdb[4] = (U8)((start_blk >> 8) & 0xff);
3077 		cdb[3] = (U8)((start_blk >> 16) & 0xff);
3078 		cdb[2] = (U8)((start_blk >> 24) & 0xff);
3079 		break;
3080 	case 12:
3081 		cdb[5]	  = (U8)(start_blk & 0xff);
3082 		cdb[4]	  = (U8)((start_blk >> 8) & 0xff);
3083 		cdb[3]	  = (U8)((start_blk >> 16) & 0xff);
3084 		cdb[2]	  = (U8)((start_blk >> 24) & 0xff);
3085 		break;
3086 
3087 	case 16:
3088 		cdb[9]	= (U8)(start_blk & 0xff);
3089 		cdb[8]	= (U8)((start_blk >> 8) & 0xff);
3090 		cdb[7]	= (U8)((start_blk >> 16) & 0xff);
3091 		cdb[6]	= (U8)((start_blk >> 24) & 0xff);
3092 		cdb[5]	= (U8)((start_blk >> 32) & 0xff);
3093 		cdb[4]	= (U8)((start_blk >> 40) & 0xff);
3094 		cdb[3]	= (U8)((start_blk >> 48) & 0xff);
3095 		cdb[2]	= (U8)((start_blk >> 56) & 0xff);
3096 		break;
3097 	}
3098 
3099 	*cdb_len_ptr = cdb_len;
3100 }
3101 
3102 
3103 static int
3104 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3105 {
3106 	MR_FW_RAID_MAP_ALL *ld_map;
3107 
3108 	if (!mrsas_tbolt_get_ld_map_info(instance)) {
3109 
3110 		ld_map = instance->ld_map[(instance->map_id & 1)];
3111 
3112 		con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3113 		    ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3114 
3115 		if (MR_ValidateMapInfo(instance->ld_map[
3116 		    (instance->map_id & 1)], instance->load_balance_info)) {
3117 			con_log(CL_ANN,
3118 			    (CE_CONT, "MR_ValidateMapInfo success"));
3119 
3120 			instance->fast_path_io = 1;
3121 			con_log(CL_ANN,
3122 			    (CE_NOTE, "instance->fast_path_io %d",
3123 			    instance->fast_path_io));
3124 
3125 			return (DDI_SUCCESS);
3126 		}
3127 
3128 	}
3129 
3130 	instance->fast_path_io = 0;
3131 	cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3132 	con_log(CL_ANN, (CE_NOTE,
3133 	    "instance->fast_path_io %d", instance->fast_path_io));
3134 
3135 	return (DDI_FAILURE);
3136 }
3137 
3138 /*
3139  * Marks HBA as bad. This will be called either when an
3140  * IO packet times out even after 3 FW resets
3141  * or FW is found to be fault even after 3 continuous resets.
3142  */
3143 
3144 void
3145 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3146 {
3147 	cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3148 
3149 	if (instance->deadadapter == 1)
3150 		return;
3151 
3152 	con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3153 	    "Writing to doorbell with MFI_STOP_ADP "));
3154 	mutex_enter(&instance->ocr_flags_mtx);
3155 	instance->deadadapter = 1;
3156 	mutex_exit(&instance->ocr_flags_mtx);
3157 	instance->func_ptr->disable_intr(instance);
3158 	WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3159 	/* Flush */
3160 	(void) RD_RESERVED0_REGISTER(instance);
3161 
3162 	(void) mrsas_print_pending_cmds(instance);
3163 	(void) mrsas_complete_pending_cmds(instance);
3164 }
3165 
3166 void
3167 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3168 {
3169 	int i;
3170 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3171 	instance->reply_read_index = 0;
3172 
3173 	/* initializing reply address to 0xFFFFFFFF */
3174 	reply_desc = instance->reply_frame_pool;
3175 
3176 	for (i = 0; i < instance->reply_q_depth; i++) {
3177 		reply_desc->Words = (uint64_t)~0;
3178 		reply_desc++;
3179 	}
3180 }
3181 
3182 int
3183 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3184 {
3185 	uint32_t status = 0x00;
3186 	uint32_t retry = 0;
3187 	uint32_t cur_abs_reg_val;
3188 	uint32_t fw_state;
3189 	uint32_t abs_state;
3190 	uint32_t i;
3191 
3192 	con_log(CL_ANN, (CE_NOTE,
3193 	    "mrsas_tbolt_reset_ppc entered"));
3194 
3195 	if (instance->deadadapter == 1) {
3196 		cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3197 		    "no more resets as HBA has been marked dead ");
3198 		return (DDI_FAILURE);
3199 	}
3200 
3201 	mutex_enter(&instance->ocr_flags_mtx);
3202 	instance->adapterresetinprogress = 1;
3203 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3204 	    "adpterresetinprogress flag set, time %llx", gethrtime()));
3205 	mutex_exit(&instance->ocr_flags_mtx);
3206 
3207 	instance->func_ptr->disable_intr(instance);
3208 
3209 	/* Add delay inorder to complete the ioctl & io cmds in-flight */
3210 	for (i = 0; i < 3000; i++) {
3211 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3212 	}
3213 
3214 	instance->reply_read_index = 0;
3215 
3216 retry_reset:
3217 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3218 	    ":Resetting TBOLT "));
3219 
3220 	WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3221 	WR_TBOLT_IB_WRITE_SEQ(4, instance);
3222 	WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3223 	WR_TBOLT_IB_WRITE_SEQ(2, instance);
3224 	WR_TBOLT_IB_WRITE_SEQ(7, instance);
3225 	WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3226 	con_log(CL_ANN1, (CE_NOTE,
3227 	    "mrsas_tbolt_reset_ppc: magic number written "
3228 	    "to write sequence register"));
3229 	delay(100 * drv_usectohz(MILLISEC));
3230 	status = RD_TBOLT_HOST_DIAG(instance);
3231 	con_log(CL_ANN1, (CE_NOTE,
3232 	    "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3233 	    "to write sequence register"));
3234 
3235 	while (status & DIAG_TBOLT_RESET_ADAPTER) {
3236 		delay(100 * drv_usectohz(MILLISEC));
3237 		status = RD_TBOLT_HOST_DIAG(instance);
3238 		if (retry++ == 100) {
3239 			cmn_err(CE_WARN,
3240 			    "mrsas_tbolt_reset_ppc:"
3241 			    "resetadapter bit is set already "
3242 			    "check retry count %d", retry);
3243 			return (DDI_FAILURE);
3244 		}
3245 	}
3246 
3247 	WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3248 	delay(100 * drv_usectohz(MILLISEC));
3249 
3250 	ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3251 	    (uint8_t *)((uintptr_t)(instance)->regmap +
3252 	    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3253 
3254 	while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3255 		delay(100 * drv_usectohz(MILLISEC));
3256 		ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3257 		    (uint8_t *)((uintptr_t)(instance)->regmap +
3258 		    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3259 		if (retry++ == 100) {
3260 			/* Dont call kill adapter here */
3261 			/* RESET BIT ADAPTER is cleared by firmare */
3262 			/* mrsas_tbolt_kill_adapter(instance); */
3263 			cmn_err(CE_WARN,
3264 			    "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3265 			    instance->instance, __func__);
3266 			return (DDI_FAILURE);
3267 		}
3268 	}
3269 
3270 	con_log(CL_ANN,
3271 	    (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3272 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3273 	    "Calling mfi_state_transition_to_ready"));
3274 
3275 	abs_state = instance->func_ptr->read_fw_status_reg(instance);
3276 	retry = 0;
3277 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3278 		delay(100 * drv_usectohz(MILLISEC));
3279 		abs_state = instance->func_ptr->read_fw_status_reg(instance);
3280 	}
3281 	if (abs_state <= MFI_STATE_FW_INIT) {
3282 		cmn_err(CE_WARN,
3283 		    "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3284 		    "state = 0x%x, RETRY RESET.", abs_state);
3285 		goto retry_reset;
3286 	}
3287 
3288 	/* Mark HBA as bad, if FW is fault after 3 continuous resets */
3289 	if (mfi_state_transition_to_ready(instance) ||
3290 	    debug_tbolt_fw_faults_after_ocr_g == 1) {
3291 		cur_abs_reg_val =
3292 		    instance->func_ptr->read_fw_status_reg(instance);
3293 		fw_state	= cur_abs_reg_val & MFI_STATE_MASK;
3294 
3295 		con_log(CL_ANN1, (CE_NOTE,
3296 		    "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3297 		    "FW state = 0x%x", fw_state));
3298 		if (debug_tbolt_fw_faults_after_ocr_g == 1)
3299 			fw_state = MFI_STATE_FAULT;
3300 
3301 		con_log(CL_ANN,
3302 		    (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3303 		    "FW state = 0x%x", fw_state));
3304 
3305 		if (fw_state == MFI_STATE_FAULT) {
3306 			/* increment the count */
3307 			instance->fw_fault_count_after_ocr++;
3308 			if (instance->fw_fault_count_after_ocr
3309 			    < MAX_FW_RESET_COUNT) {
3310 				cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3311 				    "FW is in fault after OCR count %d "
3312 				    "Retry Reset",
3313 				    instance->fw_fault_count_after_ocr);
3314 				goto retry_reset;
3315 
3316 			} else {
3317 				cmn_err(CE_WARN, "mrsas %d: %s:"
3318 				    "Max Reset Count exceeded >%d"
3319 				    "Mark HBA as bad, KILL adapter",
3320 				    instance->instance, __func__,
3321 				    MAX_FW_RESET_COUNT);
3322 
3323 				mrsas_tbolt_kill_adapter(instance);
3324 				return (DDI_FAILURE);
3325 			}
3326 		}
3327 	}
3328 
3329 	/* reset the counter as FW is up after OCR */
3330 	instance->fw_fault_count_after_ocr = 0;
3331 
3332 	mrsas_reset_reply_desc(instance);
3333 
3334 
3335 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3336 	    "Calling mrsas_issue_init_mpi2"));
3337 	abs_state = mrsas_issue_init_mpi2(instance);
3338 	if (abs_state == (uint32_t)DDI_FAILURE) {
3339 		cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3340 		    "INIT failed Retrying Reset");
3341 		goto retry_reset;
3342 	}
3343 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3344 	    "mrsas_issue_init_mpi2 Done"));
3345 
3346 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3347 	    "Calling mrsas_print_pending_cmd"));
3348 	(void) mrsas_print_pending_cmds(instance);
3349 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3350 	    "mrsas_print_pending_cmd done"));
3351 
3352 	instance->func_ptr->enable_intr(instance);
3353 	instance->fw_outstanding = 0;
3354 
3355 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3356 	    "Calling mrsas_issue_pending_cmds"));
3357 	(void) mrsas_issue_pending_cmds(instance);
3358 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3359 	"issue_pending_cmds done."));
3360 
3361 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3362 	    "Calling aen registration"));
3363 
3364 	instance->aen_cmd->retry_count_for_ocr = 0;
3365 	instance->aen_cmd->drv_pkt_time = 0;
3366 
3367 	instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3368 
3369 	con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3370 	mutex_enter(&instance->ocr_flags_mtx);
3371 	instance->adapterresetinprogress = 0;
3372 	mutex_exit(&instance->ocr_flags_mtx);
3373 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3374 	    "adpterresetinprogress flag unset"));
3375 
3376 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3377 	return (DDI_SUCCESS);
3378 
3379 }
3380 
3381 
3382 /*
3383  * mrsas_sync_map_info -	Returns FW's ld_map structure
3384  * @instance:				Adapter soft state
3385  *
3386  * Issues an internal command (DCMD) to get the FW's controller PD
3387  * list structure.  This information is mainly used to find out SYSTEM
3388  * supported by the FW.
3389  */
3390 
3391 static int
3392 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3393 {
3394 	int			ret = 0, i;
3395 	struct mrsas_cmd	*cmd = NULL;
3396 	struct mrsas_dcmd_frame	*dcmd;
3397 	uint32_t size_sync_info, num_lds;
3398 	LD_TARGET_SYNC *ci = NULL;
3399 	MR_FW_RAID_MAP_ALL *map;
3400 	MR_LD_RAID  *raid;
3401 	LD_TARGET_SYNC *ld_sync;
3402 	uint32_t ci_h = 0;
3403 	uint32_t size_map_info;
3404 
3405 	cmd = get_raid_msg_pkt(instance);
3406 
3407 	if (cmd == NULL) {
3408 		cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3409 		    "mrsas_tbolt_sync_map_info(). ");
3410 		return (DDI_FAILURE);
3411 	}
3412 
3413 	/* Clear the frame buffer and assign back the context id */
3414 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3415 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3416 	    cmd->index);
3417 	bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3418 
3419 
3420 	map = instance->ld_map[instance->map_id & 1];
3421 
3422 	num_lds = map->raidMap.ldCount;
3423 
3424 	dcmd = &cmd->frame->dcmd;
3425 
3426 	size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3427 
3428 	con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3429 	    size_sync_info, num_lds));
3430 
3431 	ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3432 
3433 	bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3434 	ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3435 
3436 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3437 
3438 	ld_sync = (LD_TARGET_SYNC *)ci;
3439 
3440 	for (i = 0; i < num_lds; i++, ld_sync++) {
3441 		raid = MR_LdRaidGet(i, map);
3442 
3443 		con_log(CL_ANN1,
3444 		    (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3445 		    i, raid->seqNum, raid->flags.ldSyncRequired));
3446 
3447 		ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3448 
3449 		con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3450 		    i, ld_sync->ldTargetId));
3451 
3452 		ld_sync->seqNum = raid->seqNum;
3453 	}
3454 
3455 
3456 	size_map_info = sizeof (MR_FW_RAID_MAP) +
3457 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3458 
3459 	dcmd->cmd = MFI_CMD_OP_DCMD;
3460 	dcmd->cmd_status = 0xFF;
3461 	dcmd->sge_count = 1;
3462 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3463 	dcmd->timeout = 0;
3464 	dcmd->pad_0 = 0;
3465 	dcmd->data_xfer_len = size_map_info;
3466 	ASSERT(num_lds <= 255);
3467 	dcmd->mbox.b[0] = (U8)num_lds;
3468 	dcmd->mbox.b[1] = 1; /* Pend */
3469 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3470 	dcmd->sgl.sge32[0].phys_addr = ci_h;
3471 	dcmd->sgl.sge32[0].length = size_map_info;
3472 
3473 
3474 	instance->map_update_cmd = cmd;
3475 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3476 
3477 	instance->func_ptr->issue_cmd(cmd, instance);
3478 
3479 	instance->unroll.syncCmd = 1;
3480 	con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3481 
3482 	return (ret);
3483 }
3484 
3485 /*
3486  * abort_syncmap_cmd
3487  */
3488 int
3489 abort_syncmap_cmd(struct mrsas_instance *instance,
3490     struct mrsas_cmd *cmd_to_abort)
3491 {
3492 	int	ret = 0;
3493 
3494 	struct mrsas_cmd		*cmd;
3495 	struct mrsas_abort_frame	*abort_fr;
3496 
3497 	con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3498 
3499 	cmd = get_raid_msg_mfi_pkt(instance);
3500 
3501 	if (!cmd) {
3502 		cmn_err(CE_WARN,
3503 		    "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3504 		return (DDI_FAILURE);
3505 	}
3506 	/* Clear the frame buffer and assign back the context id */
3507 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3508 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3509 	    cmd->index);
3510 
3511 	abort_fr = &cmd->frame->abort;
3512 
3513 	/* prepare and issue the abort frame */
3514 	ddi_put8(cmd->frame_dma_obj.acc_handle,
3515 	    &abort_fr->cmd, MFI_CMD_OP_ABORT);
3516 	ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3517 	    MFI_CMD_STATUS_SYNC_MODE);
3518 	ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3519 	ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3520 	    cmd_to_abort->index);
3521 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3522 	    &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3523 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3524 	    &abort_fr->abort_mfi_phys_addr_hi, 0);
3525 
3526 	cmd->frame_count = 1;
3527 
3528 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3529 
3530 	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3531 		con_log(CL_ANN1, (CE_WARN,
3532 		    "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3533 		ret = -1;
3534 	} else {
3535 		ret = 0;
3536 	}
3537 
3538 	return_raid_msg_mfi_pkt(instance, cmd);
3539 
3540 	atomic_add_16(&instance->fw_outstanding, (-1));
3541 
3542 	return (ret);
3543 }
3544 
3545 
3546 #ifdef PDSUPPORT
3547 /*
3548  * Even though these functions were originally intended for 2208 only, it
3549  * turns out they're useful for "Skinny" support as well.  In a perfect world,
3550  * these two functions would be either in mr_sas.c, or in their own new source
3551  * file.  Since this driver needs some cleanup anyway, keep this portion in
3552  * mind as well.
3553  */
3554 
3555 int
3556 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3557     uint8_t lun, dev_info_t **ldip)
3558 {
3559 	struct scsi_device *sd;
3560 	dev_info_t *child;
3561 	int rval, dtype;
3562 	struct mrsas_tbolt_pd_info *pds = NULL;
3563 
3564 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3565 	    tgt, lun));
3566 
3567 	if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3568 		if (ldip) {
3569 			*ldip = child;
3570 		}
3571 		if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3572 			rval = mrsas_service_evt(instance, tgt, 1,
3573 			    MRSAS_EVT_UNCONFIG_TGT, NULL);
3574 			con_log(CL_ANN1, (CE_WARN,
3575 			    "mr_sas:DELETING STALE ENTRY  rval = %d "
3576 			    "tgt id = %d", rval, tgt));
3577 			return (NDI_FAILURE);
3578 		}
3579 		return (NDI_SUCCESS);
3580 	}
3581 
3582 	pds = (struct mrsas_tbolt_pd_info *)
3583 	    kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3584 	mrsas_tbolt_get_pd_info(instance, pds, tgt);
3585 	dtype = pds->scsiDevType;
3586 
3587 	/* Check for Disk */
3588 	if ((dtype == DTYPE_DIRECT)) {
3589 		if ((dtype == DTYPE_DIRECT) &&
3590 		    (LE_16(pds->fwState) != PD_SYSTEM)) {
3591 			kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3592 			return (NDI_FAILURE);
3593 		}
3594 		sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3595 		sd->sd_address.a_hba_tran = instance->tran;
3596 		sd->sd_address.a_target = (uint16_t)tgt;
3597 		sd->sd_address.a_lun = (uint8_t)lun;
3598 
3599 		if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3600 			rval = mrsas_config_scsi_device(instance, sd, ldip);
3601 			con_log(CL_DLEVEL1, (CE_NOTE,
3602 			    "Phys. device found: tgt %d dtype %d: %s",
3603 			    tgt, dtype, sd->sd_inq->inq_vid));
3604 		} else {
3605 			rval = NDI_FAILURE;
3606 			con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3607 			    "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3608 			    tgt, dtype, sd->sd_inq->inq_vid));
3609 		}
3610 
3611 		/* sd_unprobe is blank now. Free buffer manually */
3612 		if (sd->sd_inq) {
3613 			kmem_free(sd->sd_inq, SUN_INQSIZE);
3614 			sd->sd_inq = (struct scsi_inquiry *)NULL;
3615 		}
3616 		kmem_free(sd, sizeof (struct scsi_device));
3617 	} else {
3618 		con_log(CL_ANN1, (CE_NOTE,
3619 		    "Device not supported: tgt %d lun %d dtype %d",
3620 		    tgt, lun, dtype));
3621 		rval = NDI_FAILURE;
3622 	}
3623 
3624 	kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3625 	con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3626 	    rval));
3627 	return (rval);
3628 }
3629 
3630 static void
3631 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3632     struct mrsas_tbolt_pd_info *pds, int tgt)
3633 {
3634 	struct mrsas_cmd	*cmd;
3635 	struct mrsas_dcmd_frame	*dcmd;
3636 	dma_obj_t		dcmd_dma_obj;
3637 
3638 	ASSERT(instance->tbolt || instance->skinny);
3639 
3640 	if (instance->tbolt)
3641 		cmd = get_raid_msg_pkt(instance);
3642 	else
3643 		cmd = mrsas_get_mfi_pkt(instance);
3644 
3645 	if (!cmd) {
3646 		con_log(CL_ANN1,
3647 		    (CE_WARN, "Failed to get a cmd for get pd info"));
3648 		return;
3649 	}
3650 
3651 	/* Clear the frame buffer and assign back the context id */
3652 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3653 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3654 	    cmd->index);
3655 
3656 
3657 	dcmd = &cmd->frame->dcmd;
3658 	dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3659 	dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3660 	dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3661 	dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3662 	dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3663 	dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3664 
3665 	(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3666 	    DDI_STRUCTURE_LE_ACC);
3667 	bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3668 	bzero(dcmd->mbox.b, 12);
3669 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3670 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3671 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3672 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3673 	    MFI_FRAME_DIR_READ);
3674 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3675 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3676 	    sizeof (struct mrsas_tbolt_pd_info));
3677 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3678 	    MR_DCMD_PD_GET_INFO);
3679 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3680 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3681 	    sizeof (struct mrsas_tbolt_pd_info));
3682 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3683 	    dcmd_dma_obj.dma_cookie[0].dmac_address);
3684 
3685 	cmd->sync_cmd = MRSAS_TRUE;
3686 	cmd->frame_count = 1;
3687 
3688 	if (instance->tbolt)
3689 		mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3690 
3691 	instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3692 
3693 	ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3694 	    (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3695 	    DDI_DEV_AUTOINCR);
3696 	(void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3697 
3698 	if (instance->tbolt)
3699 		return_raid_msg_pkt(instance, cmd);
3700 	else
3701 		mrsas_return_mfi_pkt(instance, cmd);
3702 }
3703 #endif
3704