xref: /illumos-gate/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c (revision c3d26abc9ee97b4f60233556aadeb57e0bd30bb9)
1 /*
2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3  * i.e. Thunderbolt and Invader
4  *
5  * Solaris MegaRAID device driver for SAS2.0 controllers
6  * Copyright (c) 2008-2012, LSI Logic Corporation.
7  * All rights reserved.
8  *
9  * Version:
10  * Author:
11  *		Swaminathan K S
12  *		Arun Chandrashekhar
13  *		Manju R
14  *		Rasheed
15  *		Shakeel Bukhari
16  */
17 
18 /*
19  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
20  * Copyright 2015 Citrus IT Limited. All rights reserved.
21  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
22  */
23 
24 
25 #include <sys/types.h>
26 #include <sys/file.h>
27 #include <sys/atomic.h>
28 #include <sys/scsi/scsi.h>
29 #include <sys/byteorder.h>
30 #include <sys/sdt.h>
31 #include "ld_pd_map.h"
32 #include "mr_sas.h"
33 #include "fusion.h"
34 
35 /*
36  * FMA header files
37  */
38 #include <sys/ddifm.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/util.h>
41 #include <sys/fm/io/ddi.h>
42 
43 
44 /* Pre-TB command size and TB command size. */
45 #define	MR_COMMAND_SIZE (64*20)	/* 1280 bytes */
46 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
47 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
48 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
49 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
50 extern ddi_dma_attr_t mrsas_generic_dma_attr;
51 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
52 extern struct ddi_device_acc_attr endian_attr;
53 extern int	debug_level_g;
54 extern unsigned int	enable_fp;
55 volatile int dump_io_wait_time = 90;
56 extern volatile int  debug_timeout_g;
57 extern int	mrsas_issue_pending_cmds(struct mrsas_instance *);
58 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
59 extern void	push_pending_mfi_pkt(struct mrsas_instance *,
60 			struct mrsas_cmd *);
61 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
62 	    MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
63 
64 /* Local static prototypes. */
65 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
66     struct scsi_address *, struct scsi_pkt *, uchar_t *);
67 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
68     U64 start_blk, U32 num_blocks);
69 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
70 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
71 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
72 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
73 #ifdef PDSUPPORT
74 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
75     struct mrsas_tbolt_pd_info *, int);
76 #endif /* PDSUPPORT */
77 
78 static int debug_tbolt_fw_faults_after_ocr_g = 0;
79 
80 /*
81  * destroy_mfi_mpi_frame_pool
82  */
83 void
84 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
85 {
86 	int	i;
87 
88 	struct mrsas_cmd	*cmd;
89 
90 	/* return all mfi frames to pool */
91 	for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
92 		cmd = instance->cmd_list[i];
93 		if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
94 			(void) mrsas_free_dma_obj(instance,
95 			    cmd->frame_dma_obj);
96 		}
97 		cmd->frame_dma_obj_status = DMA_OBJ_FREED;
98 	}
99 }
100 
101 /*
102  * destroy_mpi2_frame_pool
103  */
104 void
105 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
106 {
107 
108 	if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
109 		(void) mrsas_free_dma_obj(instance,
110 		    instance->mpi2_frame_pool_dma_obj);
111 		instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
112 	}
113 }
114 
115 
116 /*
117  * mrsas_tbolt_free_additional_dma_buffer
118  */
119 void
120 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
121 {
122 	int i;
123 
124 	if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
125 		(void) mrsas_free_dma_obj(instance,
126 		    instance->mfi_internal_dma_obj);
127 		instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
128 	}
129 	if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
130 		(void) mrsas_free_dma_obj(instance,
131 		    instance->mfi_evt_detail_obj);
132 		instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
133 	}
134 
135 	for (i = 0; i < 2; i++) {
136 		if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
137 			(void) mrsas_free_dma_obj(instance,
138 			    instance->ld_map_obj[i]);
139 			instance->ld_map_obj[i].status = DMA_OBJ_FREED;
140 		}
141 	}
142 }
143 
144 
145 /*
146  * free_req_desc_pool
147  */
148 void
149 free_req_rep_desc_pool(struct mrsas_instance *instance)
150 {
151 	if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
152 		(void) mrsas_free_dma_obj(instance,
153 		    instance->request_desc_dma_obj);
154 		instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
155 	}
156 
157 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
158 		(void) mrsas_free_dma_obj(instance,
159 		    instance->reply_desc_dma_obj);
160 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
161 	}
162 
163 
164 }
165 
166 
167 /*
168  * ThunderBolt(TB) Request Message Frame Pool
169  */
170 int
171 create_mpi2_frame_pool(struct mrsas_instance *instance)
172 {
173 	int		i = 0;
174 	uint16_t	max_cmd;
175 	uint32_t	sgl_sz;
176 	uint32_t	raid_msg_size;
177 	uint32_t	total_size;
178 	uint32_t	offset;
179 	uint32_t	io_req_base_phys;
180 	uint8_t		*io_req_base;
181 	struct mrsas_cmd	*cmd;
182 
183 	max_cmd = instance->max_fw_cmds;
184 
185 	sgl_sz		= 1024;
186 	raid_msg_size	= MRSAS_THUNDERBOLT_MSG_SIZE;
187 
188 	/* Allocating additional 256 bytes to accomodate SMID 0. */
189 	total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
190 	    (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
191 
192 	con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
193 	    "max_cmd %x", max_cmd));
194 
195 	con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
196 	    "request message frame pool size %x", total_size));
197 
198 	/*
199 	 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
200 	 * and then split the memory to 1024 commands. Each command should be
201 	 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
202 	 * within it. Further refer the "alloc_req_rep_desc" function where
203 	 * we allocate request/reply descriptors queues for a clue.
204 	 */
205 
206 	instance->mpi2_frame_pool_dma_obj.size = total_size;
207 	instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
208 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
209 	    0xFFFFFFFFU;
210 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
211 	    0xFFFFFFFFU;
212 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
213 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
214 
215 	if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
216 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
217 		dev_err(instance->dip, CE_WARN,
218 		    "could not alloc mpi2 frame pool");
219 		return (DDI_FAILURE);
220 	}
221 
222 	bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
223 	instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
224 
225 	instance->io_request_frames =
226 	    (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
227 	instance->io_request_frames_phy =
228 	    (uint32_t)
229 	    instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
230 
231 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
232 	    (void *)instance->io_request_frames));
233 
234 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
235 	    instance->io_request_frames_phy));
236 
237 	io_req_base = (uint8_t *)instance->io_request_frames +
238 	    MRSAS_THUNDERBOLT_MSG_SIZE;
239 	io_req_base_phys = instance->io_request_frames_phy +
240 	    MRSAS_THUNDERBOLT_MSG_SIZE;
241 
242 	con_log(CL_DLEVEL3, (CE_NOTE,
243 	    "io req_base_phys 0x%x", io_req_base_phys));
244 
245 	for (i = 0; i < max_cmd; i++) {
246 		cmd = instance->cmd_list[i];
247 
248 		offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
249 
250 		cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
251 		    ((uint8_t *)io_req_base + offset);
252 		cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
253 
254 		cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
255 		    (max_cmd * raid_msg_size) + i * sgl_sz);
256 
257 		cmd->sgl_phys_addr = (io_req_base_phys +
258 		    (max_cmd * raid_msg_size) + i * sgl_sz);
259 
260 		cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
261 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
262 		    (i * SENSE_LENGTH));
263 
264 		cmd->sense_phys_addr1 = (io_req_base_phys +
265 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
266 		    (i * SENSE_LENGTH));
267 
268 
269 		cmd->SMID = i + 1;
270 
271 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
272 		    cmd->index, (void *)cmd->scsi_io_request));
273 
274 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
275 		    cmd->index, cmd->scsi_io_request_phys_addr));
276 
277 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
278 		    cmd->index, (void *)cmd->sense1));
279 
280 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
281 		    cmd->index, cmd->sense_phys_addr1));
282 
283 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
284 		    cmd->index, (void *)cmd->sgl));
285 
286 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
287 		    cmd->index, cmd->sgl_phys_addr));
288 	}
289 
290 	return (DDI_SUCCESS);
291 
292 }
293 
294 
295 /*
296  * alloc_additional_dma_buffer for AEN
297  */
298 int
299 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
300 {
301 	uint32_t	internal_buf_size = PAGESIZE*2;
302 	int i;
303 
304 	/* Initialize buffer status as free */
305 	instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
306 	instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
307 	instance->ld_map_obj[0].status = DMA_OBJ_FREED;
308 	instance->ld_map_obj[1].status = DMA_OBJ_FREED;
309 
310 
311 	instance->mfi_internal_dma_obj.size = internal_buf_size;
312 	instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
313 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
314 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
315 	    0xFFFFFFFFU;
316 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
317 
318 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
319 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
320 		dev_err(instance->dip, CE_WARN,
321 		    "could not alloc reply queue");
322 		return (DDI_FAILURE);
323 	}
324 
325 	bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
326 
327 	instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
328 	instance->internal_buf =
329 	    (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
330 	instance->internal_buf_dmac_add =
331 	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
332 	instance->internal_buf_size = internal_buf_size;
333 
334 	/* allocate evt_detail */
335 	instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
336 	instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
337 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
338 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
339 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
340 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
341 
342 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
343 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
344 		dev_err(instance->dip, CE_WARN,
345 		    "mrsas_tbolt_alloc_additional_dma_buffer: "
346 		    "could not allocate data transfer buffer.");
347 		goto fail_tbolt_additional_buff;
348 	}
349 
350 	bzero(instance->mfi_evt_detail_obj.buffer,
351 	    sizeof (struct mrsas_evt_detail));
352 
353 	instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
354 
355 	instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
356 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
357 
358 	for (i = 0; i < 2; i++) {
359 		/* allocate the data transfer buffer */
360 		instance->ld_map_obj[i].size = instance->size_map_info;
361 		instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
362 		instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
363 		instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
364 		    0xFFFFFFFFU;
365 		instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
366 		instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
367 
368 		if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
369 		    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
370 			dev_err(instance->dip, CE_WARN,
371 			    "could not allocate data transfer buffer.");
372 			goto fail_tbolt_additional_buff;
373 		}
374 
375 		instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
376 
377 		bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
378 
379 		instance->ld_map[i] =
380 		    (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
381 		instance->ld_map_phy[i] = (uint32_t)instance->
382 		    ld_map_obj[i].dma_cookie[0].dmac_address;
383 
384 		con_log(CL_DLEVEL3, (CE_NOTE,
385 		    "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
386 
387 		con_log(CL_DLEVEL3, (CE_NOTE,
388 		    "size_map_info 0x%x", instance->size_map_info));
389 	}
390 
391 	return (DDI_SUCCESS);
392 
393 fail_tbolt_additional_buff:
394 	mrsas_tbolt_free_additional_dma_buffer(instance);
395 
396 	return (DDI_FAILURE);
397 }
398 
399 MRSAS_REQUEST_DESCRIPTOR_UNION *
400 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
401 {
402 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
403 
404 	if (index > instance->max_fw_cmds) {
405 		con_log(CL_ANN1, (CE_NOTE,
406 		    "Invalid SMID 0x%x request for descriptor", index));
407 		con_log(CL_ANN1, (CE_NOTE,
408 		    "max_fw_cmds : 0x%x", instance->max_fw_cmds));
409 		return (NULL);
410 	}
411 
412 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
413 	    ((char *)instance->request_message_pool +
414 	    (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
415 
416 	con_log(CL_ANN1, (CE_NOTE,
417 	    "request descriptor : 0x%08lx", (unsigned long)req_desc));
418 
419 	con_log(CL_ANN1, (CE_NOTE,
420 	    "request descriptor base phy : 0x%08lx",
421 	    (unsigned long)instance->request_message_pool_phy));
422 
423 	return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
424 }
425 
426 
427 /*
428  * Allocate Request and Reply  Queue Descriptors.
429  */
430 int
431 alloc_req_rep_desc(struct mrsas_instance *instance)
432 {
433 	uint32_t	request_q_sz, reply_q_sz;
434 	int		i, max_reply_q_sz;
435 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
436 
437 	/*
438 	 * ThunderBolt(TB) There's no longer producer consumer mechanism.
439 	 * Once we have an interrupt we are supposed to scan through the list of
440 	 * reply descriptors and process them accordingly. We would be needing
441 	 * to allocate memory for 1024 reply descriptors
442 	 */
443 
444 	/* Allocate Reply Descriptors */
445 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
446 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
447 
448 	/* reply queue size should be multiple of 16 */
449 	max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
450 
451 	reply_q_sz = 8 * max_reply_q_sz;
452 
453 
454 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
455 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
456 
457 	instance->reply_desc_dma_obj.size = reply_q_sz;
458 	instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
459 	instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
460 	instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
461 	instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
462 	instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
463 
464 	if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
465 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
466 		dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
467 		return (DDI_FAILURE);
468 	}
469 
470 	bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
471 	instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
472 
473 	/* virtual address of  reply queue */
474 	instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
475 	    instance->reply_desc_dma_obj.buffer);
476 
477 	instance->reply_q_depth = max_reply_q_sz;
478 
479 	con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
480 	    instance->reply_q_depth));
481 
482 	con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
483 	    (void *)instance->reply_frame_pool));
484 
485 	/* initializing reply address to 0xFFFFFFFF */
486 	reply_desc = instance->reply_frame_pool;
487 
488 	for (i = 0; i < instance->reply_q_depth; i++) {
489 		reply_desc->Words = (uint64_t)~0;
490 		reply_desc++;
491 	}
492 
493 
494 	instance->reply_frame_pool_phy =
495 	    (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
496 
497 	con_log(CL_ANN1, (CE_NOTE,
498 	    "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
499 
500 
501 	instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
502 	    reply_q_sz);
503 
504 	con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
505 	    instance->reply_pool_limit_phy));
506 
507 
508 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
509 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
510 
511 	/* Allocate Request Descriptors */
512 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
513 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
514 
515 	request_q_sz = 8 *
516 	    (instance->max_fw_cmds);
517 
518 	instance->request_desc_dma_obj.size = request_q_sz;
519 	instance->request_desc_dma_obj.dma_attr	= mrsas_generic_dma_attr;
520 	instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
521 	instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
522 	    0xFFFFFFFFU;
523 	instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen	= 1;
524 	instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
525 
526 	if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
527 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
528 		dev_err(instance->dip, CE_WARN,
529 		    "could not alloc request queue desc");
530 		goto fail_undo_reply_queue;
531 	}
532 
533 	bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
534 	instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
535 
536 	/* virtual address of  request queue desc */
537 	instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
538 	    (instance->request_desc_dma_obj.buffer);
539 
540 	instance->request_message_pool_phy =
541 	    (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
542 
543 	return (DDI_SUCCESS);
544 
545 fail_undo_reply_queue:
546 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
547 		(void) mrsas_free_dma_obj(instance,
548 		    instance->reply_desc_dma_obj);
549 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
550 	}
551 
552 	return (DDI_FAILURE);
553 }
554 
555 /*
556  * mrsas_alloc_cmd_pool_tbolt
557  *
558  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
559  * routine
560  */
561 int
562 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
563 {
564 	int		i;
565 	int		count;
566 	uint32_t	max_cmd;
567 	uint32_t	reserve_cmd;
568 	size_t		sz;
569 
570 	struct mrsas_cmd	*cmd;
571 
572 	max_cmd = instance->max_fw_cmds;
573 	con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
574 	    "max_cmd %x", max_cmd));
575 
576 
577 	sz = sizeof (struct mrsas_cmd *) * max_cmd;
578 
579 	/*
580 	 * instance->cmd_list is an array of struct mrsas_cmd pointers.
581 	 * Allocate the dynamic array first and then allocate individual
582 	 * commands.
583 	 */
584 	instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
585 
586 	/* create a frame pool and assign one frame to each cmd */
587 	for (count = 0; count < max_cmd; count++) {
588 		instance->cmd_list[count] =
589 		    kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
590 	}
591 
592 	/* add all the commands to command pool */
593 
594 	INIT_LIST_HEAD(&instance->cmd_pool_list);
595 	INIT_LIST_HEAD(&instance->cmd_pend_list);
596 	INIT_LIST_HEAD(&instance->cmd_app_pool_list);
597 
598 	reserve_cmd = MRSAS_APP_RESERVED_CMDS;
599 
600 	/* cmd index 0 reservered for IOC INIT */
601 	for (i = 1; i < reserve_cmd; i++) {
602 		cmd		= instance->cmd_list[i];
603 		cmd->index	= i;
604 		mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
605 	}
606 
607 
608 	for (i = reserve_cmd; i < max_cmd; i++) {
609 		cmd		= instance->cmd_list[i];
610 		cmd->index	= i;
611 		mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
612 	}
613 
614 	return (DDI_SUCCESS);
615 
616 mrsas_undo_cmds:
617 	if (count > 0) {
618 		/* free each cmd */
619 		for (i = 0; i < count; i++) {
620 			if (instance->cmd_list[i] != NULL) {
621 				kmem_free(instance->cmd_list[i],
622 				    sizeof (struct mrsas_cmd));
623 			}
624 			instance->cmd_list[i] = NULL;
625 		}
626 	}
627 
628 mrsas_undo_cmd_list:
629 	if (instance->cmd_list != NULL)
630 		kmem_free(instance->cmd_list, sz);
631 	instance->cmd_list = NULL;
632 
633 	return (DDI_FAILURE);
634 }
635 
636 
637 /*
638  * free_space_for_mpi2
639  */
640 void
641 free_space_for_mpi2(struct mrsas_instance *instance)
642 {
643 	/* already freed */
644 	if (instance->cmd_list == NULL) {
645 		return;
646 	}
647 
648 	/* First free the additional DMA buffer */
649 	mrsas_tbolt_free_additional_dma_buffer(instance);
650 
651 	/* Free the request/reply descriptor pool */
652 	free_req_rep_desc_pool(instance);
653 
654 	/*  Free the MPI message pool */
655 	destroy_mpi2_frame_pool(instance);
656 
657 	/* Free the MFI frame pool */
658 	destroy_mfi_frame_pool(instance);
659 
660 	/* Free all the commands in the cmd_list */
661 	/* Free the cmd_list buffer itself */
662 	mrsas_free_cmd_pool(instance);
663 }
664 
665 
666 /*
667  * ThunderBolt(TB) memory allocations for commands/messages/frames.
668  */
669 int
670 alloc_space_for_mpi2(struct mrsas_instance *instance)
671 {
672 	/* Allocate command pool (memory for cmd_list & individual commands) */
673 	if (mrsas_alloc_cmd_pool_tbolt(instance)) {
674 		dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
675 		return (DDI_FAILURE);
676 	}
677 
678 	/* Initialize single reply size and Message size */
679 	instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
680 	instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
681 
682 	instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
683 	    (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
684 	    sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
685 	instance->max_sge_in_chain = (MR_COMMAND_SIZE -
686 	    MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
687 
688 	/* Reduce SG count by 1 to take care of group cmds feature in FW */
689 	instance->max_num_sge = (instance->max_sge_in_main_msg +
690 	    instance->max_sge_in_chain - 2);
691 	instance->chain_offset_mpt_msg =
692 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
693 	instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
694 	    sizeof (MPI2_SGE_IO_UNION)) / 16;
695 	instance->reply_read_index = 0;
696 
697 
698 	/* Allocate Request and Reply descriptors Array */
699 	/* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
700 	if (alloc_req_rep_desc(instance)) {
701 		dev_err(instance->dip, CE_WARN,
702 		    "Error, allocating memory for descripter-pool");
703 		goto mpi2_undo_cmd_pool;
704 	}
705 	con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
706 	    instance->request_message_pool_phy));
707 
708 
709 	/* Allocate MFI Frame pool - for MPI-MFI passthru commands */
710 	if (create_mfi_frame_pool(instance)) {
711 		dev_err(instance->dip, CE_WARN,
712 		    "Error, allocating memory for MFI frame-pool");
713 		goto mpi2_undo_descripter_pool;
714 	}
715 
716 
717 	/* Allocate MPI2 Message pool */
718 	/*
719 	 * Make sure the buffer is alligned to 256 for raid message packet
720 	 * create a io request pool and assign one frame to each cmd
721 	 */
722 
723 	if (create_mpi2_frame_pool(instance)) {
724 		dev_err(instance->dip, CE_WARN,
725 		    "Error, allocating memory for MPI2 Message-pool");
726 		goto mpi2_undo_mfi_frame_pool;
727 	}
728 
729 #ifdef DEBUG
730 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
731 	    instance->max_sge_in_main_msg));
732 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
733 	    instance->max_sge_in_chain));
734 	con_log(CL_ANN1, (CE_CONT,
735 	    "[max_sge]0x%x", instance->max_num_sge));
736 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
737 	    instance->chain_offset_mpt_msg));
738 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
739 	    instance->chain_offset_io_req));
740 #endif
741 
742 
743 	/* Allocate additional dma buffer */
744 	if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
745 		dev_err(instance->dip, CE_WARN,
746 		    "Error, allocating tbolt additional DMA buffer");
747 		goto mpi2_undo_message_pool;
748 	}
749 
750 	return (DDI_SUCCESS);
751 
752 mpi2_undo_message_pool:
753 	destroy_mpi2_frame_pool(instance);
754 
755 mpi2_undo_mfi_frame_pool:
756 	destroy_mfi_frame_pool(instance);
757 
758 mpi2_undo_descripter_pool:
759 	free_req_rep_desc_pool(instance);
760 
761 mpi2_undo_cmd_pool:
762 	mrsas_free_cmd_pool(instance);
763 
764 	return (DDI_FAILURE);
765 }
766 
767 
768 /*
769  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
770  */
771 int
772 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
773 {
774 
775 	/*
776 	 * Reduce the max supported cmds by 1. This is to ensure that the
777 	 * reply_q_sz (1 more than the max cmd that driver may send)
778 	 * does not exceed max cmds that the FW can support
779 	 */
780 
781 	if (instance->max_fw_cmds > 1008) {
782 		instance->max_fw_cmds = 1008;
783 		instance->max_fw_cmds = instance->max_fw_cmds-1;
784 	}
785 
786 	con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
787 	    "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
788 
789 
790 	/* create a pool of commands */
791 	if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
792 		dev_err(instance->dip, CE_WARN,
793 		    "alloc_space_for_mpi2() failed.");
794 
795 		return (DDI_FAILURE);
796 	}
797 
798 	/* Send ioc init message */
799 	/* NOTE: the issue_init call does FMA checking already. */
800 	if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
801 		dev_err(instance->dip, CE_WARN,
802 		    "mrsas_issue_init_mpi2() failed.");
803 
804 		goto fail_init_fusion;
805 	}
806 
807 	instance->unroll.alloc_space_mpi2 = 1;
808 
809 	con_log(CL_ANN, (CE_NOTE,
810 	    "mrsas_init_adapter_tbolt: SUCCESSFUL"));
811 
812 	return (DDI_SUCCESS);
813 
814 fail_init_fusion:
815 	free_space_for_mpi2(instance);
816 
817 	return (DDI_FAILURE);
818 }
819 
820 
821 
822 /*
823  * init_mpi2
824  */
825 int
826 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
827 {
828 	dma_obj_t init2_dma_obj;
829 	int ret_val = DDI_SUCCESS;
830 
831 	/* allocate DMA buffer for IOC INIT message */
832 	init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
833 	init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
834 	init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
835 	init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
836 	init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
837 	init2_dma_obj.dma_attr.dma_attr_align = 256;
838 
839 	if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
840 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
841 		dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
842 		    "could not allocate data transfer buffer.");
843 		return (DDI_FAILURE);
844 	}
845 	(void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
846 
847 	con_log(CL_ANN1, (CE_NOTE,
848 	    "mrsas_issue_init_mpi2 _phys adr: %x",
849 	    init2_dma_obj.dma_cookie[0].dmac_address));
850 
851 
852 	/* Initialize and send ioc init message */
853 	ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
854 	if (ret_val == DDI_FAILURE) {
855 		con_log(CL_ANN1, (CE_WARN,
856 		    "mrsas_issue_init_mpi2: Failed"));
857 		goto fail_init_mpi2;
858 	}
859 
860 	/* free IOC init DMA buffer */
861 	if (mrsas_free_dma_obj(instance, init2_dma_obj)
862 	    != DDI_SUCCESS) {
863 		con_log(CL_ANN1, (CE_WARN,
864 		    "mrsas_issue_init_mpi2: Free Failed"));
865 		return (DDI_FAILURE);
866 	}
867 
868 	/* Get/Check and sync ld_map info */
869 	instance->map_id = 0;
870 	if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
871 		(void) mrsas_tbolt_sync_map_info(instance);
872 
873 
874 	/* No mrsas_cmd to send, so send NULL. */
875 	if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
876 		goto fail_init_mpi2;
877 
878 	con_log(CL_ANN, (CE_NOTE,
879 	    "mrsas_issue_init_mpi2: SUCCESSFUL"));
880 
881 	return (DDI_SUCCESS);
882 
883 fail_init_mpi2:
884 	(void) mrsas_free_dma_obj(instance, init2_dma_obj);
885 
886 	return (DDI_FAILURE);
887 }
888 
889 static int
890 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
891 {
892 	int				numbytes;
893 	uint16_t			flags;
894 	struct mrsas_init_frame2	*mfiFrameInit2;
895 	struct mrsas_header		*frame_hdr;
896 	Mpi2IOCInitRequest_t		*init;
897 	struct mrsas_cmd		*cmd = NULL;
898 	struct mrsas_drv_ver		drv_ver_info;
899 	MRSAS_REQUEST_DESCRIPTOR_UNION	req_desc;
900 	uint32_t			timeout;
901 
902 	con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
903 
904 
905 #ifdef DEBUG
906 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
907 	    (int)sizeof (*mfiFrameInit2)));
908 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
909 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
910 	    (int)sizeof (struct mrsas_init_frame2)));
911 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
912 	    (int)sizeof (Mpi2IOCInitRequest_t)));
913 #endif
914 
915 	init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
916 	numbytes = sizeof (*init);
917 	bzero(init, numbytes);
918 
919 	ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
920 	    MPI2_FUNCTION_IOC_INIT);
921 
922 	ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
923 	    MPI2_WHOINIT_HOST_DRIVER);
924 
925 	/* set MsgVersion and HeaderVersion host driver was built with */
926 	ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
927 	    MPI2_VERSION);
928 
929 	ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
930 	    MPI2_HEADER_VERSION);
931 
932 	ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
933 	    instance->raid_io_msg_size / 4);
934 
935 	ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
936 	    0);
937 
938 	ddi_put16(mpi2_dma_obj->acc_handle,
939 	    &init->ReplyDescriptorPostQueueDepth,
940 	    instance->reply_q_depth);
941 	/*
942 	 * These addresses are set using the DMA cookie addresses from when the
943 	 * memory was allocated.  Sense buffer hi address should be 0.
944 	 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
945 	 */
946 
947 	ddi_put32(mpi2_dma_obj->acc_handle,
948 	    &init->SenseBufferAddressHigh, 0);
949 
950 	ddi_put64(mpi2_dma_obj->acc_handle,
951 	    (uint64_t *)&init->SystemRequestFrameBaseAddress,
952 	    instance->io_request_frames_phy);
953 
954 	ddi_put64(mpi2_dma_obj->acc_handle,
955 	    &init->ReplyDescriptorPostQueueAddress,
956 	    instance->reply_frame_pool_phy);
957 
958 	ddi_put64(mpi2_dma_obj->acc_handle,
959 	    &init->ReplyFreeQueueAddress, 0);
960 
961 	cmd = instance->cmd_list[0];
962 	if (cmd == NULL) {
963 		return (DDI_FAILURE);
964 	}
965 	cmd->retry_count_for_ocr = 0;
966 	cmd->pkt = NULL;
967 	cmd->drv_pkt_time = 0;
968 
969 	mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
970 	con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
971 
972 	frame_hdr = &cmd->frame->hdr;
973 
974 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
975 	    MFI_CMD_STATUS_POLL_MODE);
976 
977 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
978 
979 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
980 
981 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
982 
983 	con_log(CL_ANN, (CE_CONT,
984 	    "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
985 
986 	/* Init the MFI Header */
987 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
988 	    &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
989 
990 	con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
991 
992 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
993 	    &mfiFrameInit2->cmd_status,
994 	    MFI_STAT_INVALID_STATUS);
995 
996 	con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
997 
998 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
999 	    &mfiFrameInit2->queue_info_new_phys_addr_lo,
1000 	    mpi2_dma_obj->dma_cookie[0].dmac_address);
1001 
1002 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1003 	    &mfiFrameInit2->data_xfer_len,
1004 	    sizeof (Mpi2IOCInitRequest_t));
1005 
1006 	con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1007 	    (int)init->ReplyDescriptorPostQueueAddress));
1008 
1009 	/* fill driver version information */
1010 	fill_up_drv_ver(&drv_ver_info);
1011 
1012 	/* allocate the driver version data transfer buffer */
1013 	instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1014 	instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1015 	instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1016 	instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1017 	instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1018 	instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1019 
1020 	if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1021 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1022 		dev_err(instance->dip, CE_WARN,
1023 		    "fusion init: Could not allocate driver version buffer.");
1024 		return (DDI_FAILURE);
1025 	}
1026 	/* copy driver version to dma buffer */
1027 	bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1028 	ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1029 	    (uint8_t *)drv_ver_info.drv_ver,
1030 	    (uint8_t *)instance->drv_ver_dma_obj.buffer,
1031 	    sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1032 
1033 	/* send driver version physical address to firmware */
1034 	ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1035 	    instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1036 
1037 	con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1038 	    mfiFrameInit2->queue_info_new_phys_addr_lo,
1039 	    (int)sizeof (Mpi2IOCInitRequest_t)));
1040 
1041 	con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1042 
1043 	con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1044 	    cmd->scsi_io_request_phys_addr,
1045 	    (int)sizeof (struct mrsas_init_frame2)));
1046 
1047 	/* disable interrupts before sending INIT2 frame */
1048 	instance->func_ptr->disable_intr(instance);
1049 
1050 	req_desc.Words = cmd->scsi_io_request_phys_addr;
1051 	req_desc.MFAIo.RequestFlags =
1052 	    (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1053 
1054 	cmd->request_desc = &req_desc;
1055 
1056 	/* issue the init frame */
1057 
1058 	mutex_enter(&instance->reg_write_mtx);
1059 	WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1060 	WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1061 	mutex_exit(&instance->reg_write_mtx);
1062 
1063 	con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1064 	con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1065 	    frame_hdr->cmd_status));
1066 
1067 	timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1068 	do {
1069 		if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1070 		    &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1071 			break;
1072 		delay(1);
1073 		timeout--;
1074 	} while (timeout > 0);
1075 
1076 	if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1077 	    &mfiFrameInit2->cmd_status) == 0) {
1078 		con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1079 	} else {
1080 		con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1081 		mrsas_dump_reply_desc(instance);
1082 		goto fail_ioc_init;
1083 	}
1084 
1085 	mrsas_dump_reply_desc(instance);
1086 
1087 	instance->unroll.verBuff = 1;
1088 
1089 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1090 
1091 	return (DDI_SUCCESS);
1092 
1093 
1094 fail_ioc_init:
1095 
1096 	(void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1097 
1098 	return (DDI_FAILURE);
1099 }
1100 
1101 int
1102 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1103 {
1104 	int i;
1105 	uint32_t wait_time = dump_io_wait_time;
1106 	for (i = 0; i < wait_time; i++) {
1107 		/*
1108 		 * Check For Outstanding poll Commands
1109 		 * except ldsync command and aen command
1110 		 */
1111 		if (instance->fw_outstanding <= 2) {
1112 			break;
1113 		}
1114 		drv_usecwait(10*MILLISEC);
1115 		/* complete commands from reply queue */
1116 		(void) mr_sas_tbolt_process_outstanding_cmd(instance);
1117 	}
1118 	if (instance->fw_outstanding > 2) {
1119 		return (1);
1120 	}
1121 	return (0);
1122 }
1123 /*
1124  * scsi_pkt handling
1125  *
1126  * Visible to the external world via the transport structure.
1127  */
1128 
1129 int
1130 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1131 {
1132 	struct mrsas_instance	*instance = ADDR2MR(ap);
1133 	struct scsa_cmd		*acmd = PKT2CMD(pkt);
1134 	struct mrsas_cmd	*cmd = NULL;
1135 	uchar_t			cmd_done = 0;
1136 
1137 	con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1138 	if (instance->deadadapter == 1) {
1139 		dev_err(instance->dip, CE_WARN,
1140 		    "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1141 		    "for IO, as the HBA doesnt take any more IOs");
1142 		if (pkt) {
1143 			pkt->pkt_reason		= CMD_DEV_GONE;
1144 			pkt->pkt_statistics	= STAT_DISCON;
1145 		}
1146 		return (TRAN_FATAL_ERROR);
1147 	}
1148 	if (instance->adapterresetinprogress) {
1149 		con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1150 		    "returning mfi_pkt and setting TRAN_BUSY\n"));
1151 		return (TRAN_BUSY);
1152 	}
1153 	(void) mrsas_tbolt_prepare_pkt(acmd);
1154 
1155 	cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1156 
1157 	/*
1158 	 * Check if the command is already completed by the mrsas_build_cmd()
1159 	 * routine. In which case the busy_flag would be clear and scb will be
1160 	 * NULL and appropriate reason provided in pkt_reason field
1161 	 */
1162 	if (cmd_done) {
1163 		pkt->pkt_reason = CMD_CMPLT;
1164 		pkt->pkt_scbp[0] = STATUS_GOOD;
1165 		pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1166 		    | STATE_SENT_CMD;
1167 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1168 			(*pkt->pkt_comp)(pkt);
1169 		}
1170 
1171 		return (TRAN_ACCEPT);
1172 	}
1173 
1174 	if (cmd == NULL) {
1175 		return (TRAN_BUSY);
1176 	}
1177 
1178 
1179 	if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1180 		if (instance->fw_outstanding > instance->max_fw_cmds) {
1181 			dev_err(instance->dip, CE_WARN,
1182 			    "Command Queue Full... Returning BUSY");
1183 			DTRACE_PROBE2(tbolt_start_tran_err,
1184 			    uint16_t, instance->fw_outstanding,
1185 			    uint16_t, instance->max_fw_cmds);
1186 			return_raid_msg_pkt(instance, cmd);
1187 			return (TRAN_BUSY);
1188 		}
1189 
1190 		/* Synchronize the Cmd frame for the controller */
1191 		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1192 		    DDI_DMA_SYNC_FORDEV);
1193 
1194 		con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1195 		    "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1196 		    cmd->index, cmd->SMID));
1197 
1198 		instance->func_ptr->issue_cmd(cmd, instance);
1199 	} else {
1200 		instance->func_ptr->issue_cmd(cmd, instance);
1201 		(void) wait_for_outstanding_poll_io(instance);
1202 		(void) mrsas_common_check(instance, cmd);
1203 		DTRACE_PROBE2(tbolt_start_nointr_done,
1204 		    uint8_t, cmd->frame->hdr.cmd,
1205 		    uint8_t, cmd->frame->hdr.cmd_status);
1206 	}
1207 
1208 	return (TRAN_ACCEPT);
1209 }
1210 
1211 /*
1212  * prepare the pkt:
1213  * the pkt may have been resubmitted or just reused so
1214  * initialize some fields and do some checks.
1215  */
1216 static int
1217 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1218 {
1219 	struct scsi_pkt	*pkt = CMD2PKT(acmd);
1220 
1221 
1222 	/*
1223 	 * Reinitialize some fields that need it; the packet may
1224 	 * have been resubmitted
1225 	 */
1226 	pkt->pkt_reason = CMD_CMPLT;
1227 	pkt->pkt_state = 0;
1228 	pkt->pkt_statistics = 0;
1229 	pkt->pkt_resid = 0;
1230 
1231 	/*
1232 	 * zero status byte.
1233 	 */
1234 	*(pkt->pkt_scbp) = 0;
1235 
1236 	return (0);
1237 }
1238 
1239 
1240 int
1241 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1242     struct scsa_cmd *acmd,
1243     struct mrsas_cmd *cmd,
1244     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1245     uint32_t *datalen)
1246 {
1247 	uint32_t		MaxSGEs;
1248 	int			sg_to_process;
1249 	uint32_t		i, j;
1250 	uint32_t		numElements, endElement;
1251 	Mpi25IeeeSgeChain64_t	*ieeeChainElement = NULL;
1252 	Mpi25IeeeSgeChain64_t	*scsi_raid_io_sgl_ieee = NULL;
1253 	ddi_acc_handle_t acc_handle =
1254 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1255 	uint16_t		devid = instance->device_id;
1256 
1257 	con_log(CL_ANN1, (CE_NOTE,
1258 	    "chkpnt: Building Chained SGL :%d", __LINE__));
1259 
1260 	/* Calulate SGE size in number of Words(32bit) */
1261 	/* Clear the datalen before updating it. */
1262 	*datalen = 0;
1263 
1264 	MaxSGEs = instance->max_sge_in_main_msg;
1265 
1266 	ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1267 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1268 
1269 	/* set data transfer flag. */
1270 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1271 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1272 		    MPI2_SCSIIO_CONTROL_WRITE);
1273 	} else {
1274 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1275 		    MPI2_SCSIIO_CONTROL_READ);
1276 	}
1277 
1278 
1279 	numElements = acmd->cmd_cookiecnt;
1280 
1281 	con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1282 
1283 	if (numElements > instance->max_num_sge) {
1284 		con_log(CL_ANN, (CE_NOTE,
1285 		    "[Max SGE Count Exceeded]:%x", numElements));
1286 		return (numElements);
1287 	}
1288 
1289 	ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1290 	    (uint8_t)numElements);
1291 
1292 	/* set end element in main message frame */
1293 	endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1294 
1295 	/* prepare the scatter-gather list for the firmware */
1296 	scsi_raid_io_sgl_ieee =
1297 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1298 
1299 	if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1300 	    (devid == PCI_DEVICE_ID_LSI_FURY)) {
1301 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1302 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1303 
1304 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1305 	}
1306 
1307 	for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1308 		ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1309 		    acmd->cmd_dmacookies[i].dmac_laddress);
1310 
1311 		ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1312 		    acmd->cmd_dmacookies[i].dmac_size);
1313 
1314 		ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1315 
1316 		if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1317 		    (devid == PCI_DEVICE_ID_LSI_FURY)) {
1318 			if (i == (numElements - 1)) {
1319 				ddi_put8(acc_handle,
1320 				    &scsi_raid_io_sgl_ieee->Flags,
1321 				    IEEE_SGE_FLAGS_END_OF_LIST);
1322 			}
1323 		}
1324 
1325 		*datalen += acmd->cmd_dmacookies[i].dmac_size;
1326 
1327 #ifdef DEBUG
1328 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1329 		    scsi_raid_io_sgl_ieee->Address));
1330 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1331 		    scsi_raid_io_sgl_ieee->Length));
1332 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1333 		    scsi_raid_io_sgl_ieee->Flags));
1334 #endif
1335 
1336 	}
1337 
1338 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1339 
1340 	/* check if chained SGL required */
1341 	if (i < numElements) {
1342 
1343 		con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1344 
1345 		if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1346 		    (devid == PCI_DEVICE_ID_LSI_FURY)) {
1347 			uint16_t ioFlags =
1348 			    ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1349 
1350 			if ((ioFlags &
1351 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1352 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1353 				ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1354 				    (U8)instance->chain_offset_io_req);
1355 			} else {
1356 				ddi_put8(acc_handle,
1357 				    &scsi_raid_io->ChainOffset, 0);
1358 			}
1359 		} else {
1360 			ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1361 			    (U8)instance->chain_offset_io_req);
1362 		}
1363 
1364 		/* prepare physical chain element */
1365 		ieeeChainElement = scsi_raid_io_sgl_ieee;
1366 
1367 		ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1368 
1369 		if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1370 		    (devid == PCI_DEVICE_ID_LSI_FURY)) {
1371 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1372 			    IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1373 		} else {
1374 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1375 			    (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1376 			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1377 		}
1378 
1379 		ddi_put32(acc_handle, &ieeeChainElement->Length,
1380 		    (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1381 
1382 		ddi_put64(acc_handle, &ieeeChainElement->Address,
1383 		    (U64)cmd->sgl_phys_addr);
1384 
1385 		sg_to_process = numElements - i;
1386 
1387 		con_log(CL_ANN1, (CE_NOTE,
1388 		    "[Additional SGE Count]:%x", endElement));
1389 
1390 		/* point to the chained SGL buffer */
1391 		scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1392 
1393 		/* build rest of the SGL in chained buffer */
1394 		for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1395 			con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1396 
1397 			ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1398 			    acmd->cmd_dmacookies[i].dmac_laddress);
1399 
1400 			ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1401 			    acmd->cmd_dmacookies[i].dmac_size);
1402 
1403 			ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1404 
1405 			if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1406 			    (devid == PCI_DEVICE_ID_LSI_FURY)) {
1407 				if (i == (numElements - 1)) {
1408 					ddi_put8(acc_handle,
1409 					    &scsi_raid_io_sgl_ieee->Flags,
1410 					    IEEE_SGE_FLAGS_END_OF_LIST);
1411 				}
1412 			}
1413 
1414 			*datalen += acmd->cmd_dmacookies[i].dmac_size;
1415 
1416 #if DEBUG
1417 			con_log(CL_DLEVEL1, (CE_NOTE,
1418 			    "[SGL Address]: %" PRIx64,
1419 			    scsi_raid_io_sgl_ieee->Address));
1420 			con_log(CL_DLEVEL1, (CE_NOTE,
1421 			    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1422 			con_log(CL_DLEVEL1, (CE_NOTE,
1423 			    "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1424 #endif
1425 
1426 			i++;
1427 		}
1428 	}
1429 
1430 	return (0);
1431 } /*end of BuildScatterGather */
1432 
1433 
1434 /*
1435  * build_cmd
1436  */
1437 static struct mrsas_cmd *
1438 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1439     struct scsi_pkt *pkt, uchar_t *cmd_done)
1440 {
1441 	uint8_t		fp_possible = 0;
1442 	uint32_t	index;
1443 	uint32_t	lba_count = 0;
1444 	uint32_t	start_lba_hi = 0;
1445 	uint32_t	start_lba_lo = 0;
1446 	uint16_t	devid = instance->device_id;
1447 	ddi_acc_handle_t acc_handle =
1448 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1449 	struct mrsas_cmd		*cmd = NULL;
1450 	struct scsa_cmd			*acmd = PKT2CMD(pkt);
1451 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
1452 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
1453 	uint32_t			datalen;
1454 	struct IO_REQUEST_INFO io_info;
1455 	MR_FW_RAID_MAP_ALL *local_map_ptr;
1456 	uint16_t pd_cmd_cdblen;
1457 
1458 	con_log(CL_DLEVEL1, (CE_NOTE,
1459 	    "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1460 
1461 	/* find out if this is logical or physical drive command.  */
1462 	acmd->islogical = MRDRV_IS_LOGICAL(ap);
1463 	acmd->device_id = MAP_DEVICE_ID(instance, ap);
1464 
1465 	*cmd_done = 0;
1466 
1467 	/* get the command packet */
1468 	if (!(cmd = get_raid_msg_pkt(instance))) {
1469 		DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1470 		    instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1471 		return (NULL);
1472 	}
1473 
1474 	index = cmd->index;
1475 	ReqDescUnion =	mr_sas_get_request_descriptor(instance, index);
1476 	ReqDescUnion->Words = 0;
1477 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1478 	ReqDescUnion->SCSIIO.RequestFlags =
1479 	    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1480 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1481 
1482 
1483 	cmd->request_desc = ReqDescUnion;
1484 	cmd->pkt = pkt;
1485 	cmd->cmd = acmd;
1486 
1487 	DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1488 	    ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1489 	    uint16_t, acmd->device_id);
1490 
1491 	/* lets get the command directions */
1492 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1493 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1494 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1495 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1496 			    DDI_DMA_SYNC_FORDEV);
1497 		}
1498 	} else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1499 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1500 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1501 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1502 			    DDI_DMA_SYNC_FORCPU);
1503 		}
1504 	} else {
1505 		con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1506 	}
1507 
1508 
1509 	/* get SCSI_IO raid message frame pointer */
1510 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1511 
1512 	/* zero out SCSI_IO raid message frame */
1513 	bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1514 
1515 	/* Set the ldTargetId set by BuildRaidContext() */
1516 	ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1517 	    acmd->device_id);
1518 
1519 	/*  Copy CDB to scsi_io_request message frame */
1520 	ddi_rep_put8(acc_handle,
1521 	    (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1522 	    acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1523 
1524 	/*
1525 	 * Just the CDB length, rest of the Flags are zero
1526 	 * This will be modified later.
1527 	 */
1528 	ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1529 
1530 	pd_cmd_cdblen = acmd->cmd_cdblen;
1531 
1532 	if (acmd->islogical) {
1533 
1534 		switch (pkt->pkt_cdbp[0]) {
1535 		case SCMD_READ:
1536 		case SCMD_WRITE:
1537 		case SCMD_READ_G1:
1538 		case SCMD_WRITE_G1:
1539 		case SCMD_READ_G4:
1540 		case SCMD_WRITE_G4:
1541 		case SCMD_READ_G5:
1542 		case SCMD_WRITE_G5:
1543 
1544 			/* Initialize sense Information */
1545 			if (cmd->sense1 == NULL) {
1546 				con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1547 				    "Sense buffer ptr NULL "));
1548 			}
1549 			bzero(cmd->sense1, SENSE_LENGTH);
1550 			con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1551 			    "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1552 
1553 			if (acmd->cmd_cdblen == CDB_GROUP0) {
1554 				/* 6-byte cdb */
1555 				lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1556 				start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1557 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1558 				    ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1559 				    << 16));
1560 			} else if (acmd->cmd_cdblen == CDB_GROUP1) {
1561 				/* 10-byte cdb */
1562 				lba_count =
1563 				    (((uint16_t)(pkt->pkt_cdbp[8])) |
1564 				    ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1565 
1566 				start_lba_lo =
1567 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1568 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1569 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1570 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1571 
1572 			} else if (acmd->cmd_cdblen == CDB_GROUP5) {
1573 				/* 12-byte cdb */
1574 				lba_count = (
1575 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1576 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1577 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1578 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1579 
1580 				start_lba_lo =
1581 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1582 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1583 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1584 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1585 
1586 			} else if (acmd->cmd_cdblen == CDB_GROUP4) {
1587 				/* 16-byte cdb */
1588 				lba_count = (
1589 				    ((uint32_t)(pkt->pkt_cdbp[13])) |
1590 				    ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1591 				    ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1592 				    ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1593 
1594 				start_lba_lo = (
1595 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1596 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1597 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1598 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1599 
1600 				start_lba_hi = (
1601 				    ((uint32_t)(pkt->pkt_cdbp[5])) |
1602 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1603 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1604 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1605 			}
1606 
1607 			if (instance->tbolt &&
1608 			    ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1609 				dev_err(instance->dip, CE_WARN,
1610 				    "IO SECTOR COUNT exceeds "
1611 				    "controller limit 0x%x sectors",
1612 				    lba_count);
1613 			}
1614 
1615 			bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1616 			io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1617 			    start_lba_lo;
1618 			io_info.numBlocks = lba_count;
1619 			io_info.ldTgtId = acmd->device_id;
1620 
1621 			if (acmd->cmd_flags & CFLAG_DMASEND)
1622 				io_info.isRead = 0;
1623 			else
1624 				io_info.isRead = 1;
1625 
1626 
1627 			/* Acquire SYNC MAP UPDATE lock */
1628 			mutex_enter(&instance->sync_map_mtx);
1629 
1630 			local_map_ptr =
1631 			    instance->ld_map[(instance->map_id & 1)];
1632 
1633 			if ((MR_TargetIdToLdGet(
1634 			    acmd->device_id, local_map_ptr) >=
1635 			    MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1636 				dev_err(instance->dip, CE_NOTE,
1637 				    "Fast Path NOT Possible, "
1638 				    "targetId >= MAX_LOGICAL_DRIVES || "
1639 				    "!instance->fast_path_io");
1640 				fp_possible = 0;
1641 				/* Set Regionlock flags to BYPASS */
1642 				/* io_request->RaidContext.regLockFlags  = 0; */
1643 				ddi_put8(acc_handle,
1644 				    &scsi_raid_io->RaidContext.regLockFlags, 0);
1645 			} else {
1646 				if (MR_BuildRaidContext(instance, &io_info,
1647 				    &scsi_raid_io->RaidContext, local_map_ptr))
1648 					fp_possible = io_info.fpOkForIo;
1649 			}
1650 
1651 			if (!enable_fp)
1652 				fp_possible = 0;
1653 
1654 			con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1655 			    "instance->fast_path_io %d fp_possible %d",
1656 			    enable_fp, instance->fast_path_io, fp_possible));
1657 
1658 		if (fp_possible) {
1659 
1660 			/* Check for DIF enabled LD */
1661 			if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1662 				/* Prepare 32 Byte CDB for DIF capable Disk */
1663 				mrsas_tbolt_prepare_cdb(instance,
1664 				    scsi_raid_io->CDB.CDB32,
1665 				    &io_info, scsi_raid_io, start_lba_lo);
1666 			} else {
1667 				mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1668 				    (uint8_t *)&pd_cmd_cdblen,
1669 				    io_info.pdBlock, io_info.numBlocks);
1670 				ddi_put16(acc_handle,
1671 				    &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1672 			}
1673 
1674 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1675 			    MPI2_FUNCTION_SCSI_IO_REQUEST);
1676 
1677 			ReqDescUnion->SCSIIO.RequestFlags =
1678 			    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1679 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1680 
1681 			if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1682 			    (devid == PCI_DEVICE_ID_LSI_FURY)) {
1683 				uint8_t regLockFlags = ddi_get8(acc_handle,
1684 				    &scsi_raid_io->RaidContext.regLockFlags);
1685 				uint16_t IoFlags = ddi_get16(acc_handle,
1686 				    &scsi_raid_io->IoFlags);
1687 
1688 				if (regLockFlags == REGION_TYPE_UNUSED)
1689 					ReqDescUnion->SCSIIO.RequestFlags =
1690 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1691 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1692 
1693 				IoFlags |=
1694 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1695 				regLockFlags |=
1696 				    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1697 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1698 
1699 				ddi_put8(acc_handle,
1700 				    &scsi_raid_io->ChainOffset, 0);
1701 				ddi_put8(acc_handle,
1702 				    &scsi_raid_io->RaidContext.nsegType,
1703 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1704 				    MPI2_TYPE_CUDA));
1705 				ddi_put8(acc_handle,
1706 				    &scsi_raid_io->RaidContext.regLockFlags,
1707 				    regLockFlags);
1708 				ddi_put16(acc_handle,
1709 				    &scsi_raid_io->IoFlags, IoFlags);
1710 			}
1711 
1712 			if ((instance->load_balance_info[
1713 			    acmd->device_id].loadBalanceFlag) &&
1714 			    (io_info.isRead)) {
1715 				io_info.devHandle =
1716 				    get_updated_dev_handle(&instance->
1717 				    load_balance_info[acmd->device_id],
1718 				    &io_info);
1719 				cmd->load_balance_flag |=
1720 				    MEGASAS_LOAD_BALANCE_FLAG;
1721 			} else {
1722 				cmd->load_balance_flag &=
1723 				    ~MEGASAS_LOAD_BALANCE_FLAG;
1724 			}
1725 
1726 			ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1727 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1728 			    io_info.devHandle);
1729 
1730 		} else { /* FP Not Possible */
1731 
1732 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1733 			    MPI2_FUNCTION_LD_IO_REQUEST);
1734 
1735 			ddi_put16(acc_handle,
1736 			    &scsi_raid_io->DevHandle, acmd->device_id);
1737 
1738 			ReqDescUnion->SCSIIO.RequestFlags =
1739 			    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1740 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1741 
1742 			ddi_put16(acc_handle,
1743 			    &scsi_raid_io->RaidContext.timeoutValue,
1744 			    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1745 
1746 			if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1747 			    (devid == PCI_DEVICE_ID_LSI_FURY)) {
1748 				uint8_t regLockFlags = ddi_get8(acc_handle,
1749 				    &scsi_raid_io->RaidContext.regLockFlags);
1750 
1751 				if (regLockFlags == REGION_TYPE_UNUSED) {
1752 					ReqDescUnion->SCSIIO.RequestFlags =
1753 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1754 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1755 				}
1756 
1757 				regLockFlags |=
1758 				    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1759 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1760 
1761 				ddi_put8(acc_handle,
1762 				    &scsi_raid_io->RaidContext.nsegType,
1763 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1764 				    MPI2_TYPE_CUDA));
1765 				ddi_put8(acc_handle,
1766 				    &scsi_raid_io->RaidContext.regLockFlags,
1767 				    regLockFlags);
1768 			}
1769 		} /* Not FP */
1770 
1771 		/* Release SYNC MAP UPDATE lock */
1772 		mutex_exit(&instance->sync_map_mtx);
1773 
1774 		break;
1775 
1776 		case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1777 			return_raid_msg_pkt(instance, cmd);
1778 			*cmd_done = 1;
1779 			return (NULL);
1780 		}
1781 
1782 		case SCMD_MODE_SENSE:
1783 		case SCMD_MODE_SENSE_G1: {
1784 			union scsi_cdb	*cdbp;
1785 			uint16_t	page_code;
1786 
1787 			cdbp = (void *)pkt->pkt_cdbp;
1788 			page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1789 			switch (page_code) {
1790 			case 0x3:
1791 			case 0x4:
1792 				(void) mrsas_mode_sense_build(pkt);
1793 				return_raid_msg_pkt(instance, cmd);
1794 				*cmd_done = 1;
1795 				return (NULL);
1796 			}
1797 			return (cmd);
1798 		}
1799 
1800 		default:
1801 			/* Pass-through command to logical drive */
1802 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1803 			    MPI2_FUNCTION_LD_IO_REQUEST);
1804 			ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1805 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1806 			    acmd->device_id);
1807 			ReqDescUnion->SCSIIO.RequestFlags =
1808 			    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1809 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1810 			break;
1811 		}
1812 	} else { /* Physical */
1813 #ifdef PDSUPPORT
1814 		/* Pass-through command to physical drive */
1815 
1816 		/* Acquire SYNC MAP UPDATE lock */
1817 		mutex_enter(&instance->sync_map_mtx);
1818 
1819 		local_map_ptr = instance->ld_map[instance->map_id & 1];
1820 
1821 		ddi_put8(acc_handle, &scsi_raid_io->Function,
1822 		    MPI2_FUNCTION_SCSI_IO_REQUEST);
1823 
1824 		ReqDescUnion->SCSIIO.RequestFlags =
1825 		    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1826 		    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1827 
1828 		ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1829 		    local_map_ptr->raidMap.
1830 		    devHndlInfo[acmd->device_id].curDevHdl);
1831 
1832 		/* Set regLockFlasgs to REGION_TYPE_BYPASS */
1833 		ddi_put8(acc_handle,
1834 		    &scsi_raid_io->RaidContext.regLockFlags, 0);
1835 		ddi_put64(acc_handle,
1836 		    &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1837 		ddi_put32(acc_handle,
1838 		    &scsi_raid_io->RaidContext.regLockLength, 0);
1839 		ddi_put8(acc_handle,
1840 		    &scsi_raid_io->RaidContext.RAIDFlags,
1841 		    MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1842 		    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1843 		ddi_put16(acc_handle,
1844 		    &scsi_raid_io->RaidContext.timeoutValue,
1845 		    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1846 		ddi_put16(acc_handle,
1847 		    &scsi_raid_io->RaidContext.ldTargetId,
1848 		    acmd->device_id);
1849 		ddi_put8(acc_handle,
1850 		    &scsi_raid_io->LUN[1], acmd->lun);
1851 
1852 		if (instance->fast_path_io &&
1853 		    ((instance->device_id == PCI_DEVICE_ID_LSI_INVADER) ||
1854 		    (instance->device_id == PCI_DEVICE_ID_LSI_FURY))) {
1855 			uint16_t IoFlags = ddi_get16(acc_handle,
1856 			    &scsi_raid_io->IoFlags);
1857 			IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1858 			ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1859 		}
1860 		ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1861 		    local_map_ptr->raidMap.
1862 		    devHndlInfo[acmd->device_id].curDevHdl);
1863 
1864 		/* Release SYNC MAP UPDATE lock */
1865 		mutex_exit(&instance->sync_map_mtx);
1866 #else
1867 		/* If no PD support, return here. */
1868 		return (cmd);
1869 #endif
1870 	}
1871 
1872 	/* Set sense buffer physical address/length in scsi_io_request. */
1873 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1874 	    cmd->sense_phys_addr1);
1875 	ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1876 
1877 	/* Construct SGL */
1878 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1879 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1880 
1881 	(void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1882 	    scsi_raid_io, &datalen);
1883 
1884 	ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1885 
1886 	con_log(CL_ANN, (CE_CONT,
1887 	    "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1888 	    pkt->pkt_cdbp[0], acmd->device_id));
1889 	con_log(CL_DLEVEL1, (CE_CONT,
1890 	    "data length = %x\n",
1891 	    scsi_raid_io->DataLength));
1892 	con_log(CL_DLEVEL1, (CE_CONT,
1893 	    "cdb length = %x\n",
1894 	    acmd->cmd_cdblen));
1895 
1896 	return (cmd);
1897 }
1898 
1899 uint32_t
1900 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1901 {
1902 	return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1903 }
1904 
1905 void
1906 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1907 {
1908 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1909 	atomic_inc_16(&instance->fw_outstanding);
1910 
1911 	struct scsi_pkt *pkt;
1912 
1913 	con_log(CL_ANN1,
1914 	    (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1915 
1916 	con_log(CL_DLEVEL1, (CE_CONT,
1917 	    " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1918 	con_log(CL_DLEVEL1, (CE_CONT,
1919 	    " [req desc low part] %x \n",
1920 	    (uint_t)(req_desc->Words & 0xffffffffff)));
1921 	con_log(CL_DLEVEL1, (CE_CONT,
1922 	    " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1923 	pkt = cmd->pkt;
1924 
1925 	if (pkt) {
1926 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1927 		    "ISSUED CMD TO FW : called : cmd:"
1928 		    ": %p instance : %p pkt : %p pkt_time : %x\n",
1929 		    gethrtime(), (void *)cmd, (void *)instance,
1930 		    (void *)pkt, cmd->drv_pkt_time));
1931 		if (instance->adapterresetinprogress) {
1932 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1933 			con_log(CL_ANN, (CE_NOTE,
1934 			    "TBOLT Reset the scsi_pkt timer"));
1935 		} else {
1936 			push_pending_mfi_pkt(instance, cmd);
1937 		}
1938 
1939 	} else {
1940 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1941 		    "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1942 		    "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1943 	}
1944 
1945 	/* Issue the command to the FW */
1946 	mutex_enter(&instance->reg_write_mtx);
1947 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1948 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1949 	mutex_exit(&instance->reg_write_mtx);
1950 }
1951 
1952 /*
1953  * issue_cmd_in_sync_mode
1954  */
1955 int
1956 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1957     struct mrsas_cmd *cmd)
1958 {
1959 	int		i;
1960 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1961 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1962 
1963 	struct mrsas_header	*hdr;
1964 	hdr = (struct mrsas_header *)&cmd->frame->hdr;
1965 
1966 	con_log(CL_ANN,
1967 	    (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1968 	    cmd->SMID));
1969 
1970 
1971 	if (instance->adapterresetinprogress) {
1972 		cmd->drv_pkt_time = ddi_get16
1973 		    (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1974 		if (cmd->drv_pkt_time < debug_timeout_g)
1975 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1976 		con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1977 		    "RESET-IN-PROGRESS, issue cmd & return."));
1978 
1979 		mutex_enter(&instance->reg_write_mtx);
1980 		WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1981 		WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1982 		mutex_exit(&instance->reg_write_mtx);
1983 
1984 		return (DDI_SUCCESS);
1985 	} else {
1986 		con_log(CL_ANN1, (CE_NOTE,
1987 		    "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1988 		push_pending_mfi_pkt(instance, cmd);
1989 	}
1990 
1991 	con_log(CL_DLEVEL2, (CE_NOTE,
1992 	    "HighQport offset :%p",
1993 	    (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1994 	con_log(CL_DLEVEL2, (CE_NOTE,
1995 	    "LowQport offset :%p",
1996 	    (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1997 
1998 	cmd->sync_cmd = MRSAS_TRUE;
1999 	cmd->cmd_status =  ENODATA;
2000 
2001 
2002 	mutex_enter(&instance->reg_write_mtx);
2003 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2004 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2005 	mutex_exit(&instance->reg_write_mtx);
2006 
2007 	con_log(CL_ANN1, (CE_NOTE,
2008 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2009 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2010 	    (uint_t)(req_desc->Words & 0xffffffff)));
2011 
2012 	mutex_enter(&instance->int_cmd_mtx);
2013 	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2014 		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2015 	}
2016 	mutex_exit(&instance->int_cmd_mtx);
2017 
2018 
2019 	if (i < (msecs -1)) {
2020 		return (DDI_SUCCESS);
2021 	} else {
2022 		return (DDI_FAILURE);
2023 	}
2024 }
2025 
2026 /*
2027  * issue_cmd_in_poll_mode
2028  */
2029 int
2030 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2031     struct mrsas_cmd *cmd)
2032 {
2033 	int		i;
2034 	uint16_t	flags;
2035 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2036 	struct mrsas_header *frame_hdr;
2037 
2038 	con_log(CL_ANN,
2039 	    (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2040 	    cmd->SMID));
2041 
2042 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2043 
2044 	frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2045 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2046 	    MFI_CMD_STATUS_POLL_MODE);
2047 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2048 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2049 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2050 
2051 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2052 	    (uint_t)(req_desc->Words & 0xffffffff)));
2053 	con_log(CL_ANN1, (CE_NOTE,
2054 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2055 
2056 	/* issue the frame using inbound queue port */
2057 	mutex_enter(&instance->reg_write_mtx);
2058 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2059 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2060 	mutex_exit(&instance->reg_write_mtx);
2061 
2062 	for (i = 0; i < msecs && (
2063 	    ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2064 	    == MFI_CMD_STATUS_POLL_MODE); i++) {
2065 		/* wait for cmd_status to change from 0xFF */
2066 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2067 	}
2068 
2069 	DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2070 
2071 	if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2072 	    &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2073 		con_log(CL_ANN1, (CE_NOTE,
2074 		    " cmd failed %" PRIx64, (req_desc->Words)));
2075 		return (DDI_FAILURE);
2076 	}
2077 
2078 	return (DDI_SUCCESS);
2079 }
2080 
2081 void
2082 tbolt_enable_intr(struct mrsas_instance *instance)
2083 {
2084 	/* TODO: For Thunderbolt/Invader also clear intr on enable */
2085 	/* writel(~0, &regs->outbound_intr_status); */
2086 	/* readl(&regs->outbound_intr_status); */
2087 
2088 	WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2089 
2090 	/* dummy read to force PCI flush */
2091 	(void) RD_OB_INTR_MASK(instance);
2092 
2093 }
2094 
2095 void
2096 tbolt_disable_intr(struct mrsas_instance *instance)
2097 {
2098 	uint32_t mask = 0xFFFFFFFF;
2099 
2100 	WR_OB_INTR_MASK(mask, instance);
2101 
2102 	/* Dummy readl to force pci flush */
2103 
2104 	(void) RD_OB_INTR_MASK(instance);
2105 }
2106 
2107 
2108 int
2109 tbolt_intr_ack(struct mrsas_instance *instance)
2110 {
2111 	uint32_t	status;
2112 
2113 	/* check if it is our interrupt */
2114 	status = RD_OB_INTR_STATUS(instance);
2115 	con_log(CL_ANN1, (CE_NOTE,
2116 	    "chkpnt: Entered tbolt_intr_ack status = %d", status));
2117 
2118 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2119 		return (DDI_INTR_UNCLAIMED);
2120 	}
2121 
2122 	if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2123 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2124 		return (DDI_INTR_UNCLAIMED);
2125 	}
2126 
2127 	if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2128 		/* clear the interrupt by writing back the same value */
2129 		WR_OB_INTR_STATUS(status, instance);
2130 		/* dummy READ */
2131 		(void) RD_OB_INTR_STATUS(instance);
2132 	}
2133 	return (DDI_INTR_CLAIMED);
2134 }
2135 
2136 /*
2137  * get_raid_msg_pkt : Get a command from the free pool
2138  * After successful allocation, the caller of this routine
2139  * must clear the frame buffer (memset to zero) before
2140  * using the packet further.
2141  *
2142  * ***** Note *****
2143  * After clearing the frame buffer the context id of the
2144  * frame buffer SHOULD be restored back.
2145  */
2146 
2147 struct mrsas_cmd *
2148 get_raid_msg_pkt(struct mrsas_instance *instance)
2149 {
2150 	mlist_t			*head = &instance->cmd_pool_list;
2151 	struct mrsas_cmd	*cmd = NULL;
2152 
2153 	mutex_enter(&instance->cmd_pool_mtx);
2154 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2155 
2156 
2157 	if (!mlist_empty(head)) {
2158 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2159 		mlist_del_init(head->next);
2160 	}
2161 	if (cmd != NULL) {
2162 		cmd->pkt = NULL;
2163 		cmd->retry_count_for_ocr = 0;
2164 		cmd->drv_pkt_time = 0;
2165 	}
2166 	mutex_exit(&instance->cmd_pool_mtx);
2167 
2168 	if (cmd != NULL)
2169 		bzero(cmd->scsi_io_request,
2170 		    sizeof (Mpi2RaidSCSIIORequest_t));
2171 	return (cmd);
2172 }
2173 
2174 struct mrsas_cmd *
2175 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2176 {
2177 	mlist_t			*head = &instance->cmd_app_pool_list;
2178 	struct mrsas_cmd	*cmd = NULL;
2179 
2180 	mutex_enter(&instance->cmd_app_pool_mtx);
2181 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2182 
2183 	if (!mlist_empty(head)) {
2184 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2185 		mlist_del_init(head->next);
2186 	}
2187 	if (cmd != NULL) {
2188 		cmd->retry_count_for_ocr = 0;
2189 		cmd->drv_pkt_time = 0;
2190 		cmd->pkt = NULL;
2191 		cmd->request_desc = NULL;
2192 
2193 	}
2194 
2195 	mutex_exit(&instance->cmd_app_pool_mtx);
2196 
2197 	if (cmd != NULL) {
2198 		bzero(cmd->scsi_io_request,
2199 		    sizeof (Mpi2RaidSCSIIORequest_t));
2200 	}
2201 
2202 	return (cmd);
2203 }
2204 
2205 /*
2206  * return_raid_msg_pkt : Return a cmd to free command pool
2207  */
2208 void
2209 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2210 {
2211 	mutex_enter(&instance->cmd_pool_mtx);
2212 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2213 
2214 
2215 	mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2216 
2217 	mutex_exit(&instance->cmd_pool_mtx);
2218 }
2219 
2220 void
2221 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2222 {
2223 	mutex_enter(&instance->cmd_app_pool_mtx);
2224 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2225 
2226 	mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2227 
2228 	mutex_exit(&instance->cmd_app_pool_mtx);
2229 }
2230 
2231 
2232 void
2233 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2234     struct mrsas_cmd *cmd)
2235 {
2236 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2237 	Mpi25IeeeSgeChain64_t		*scsi_raid_io_sgl_ieee;
2238 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
2239 	uint32_t			index;
2240 	ddi_acc_handle_t acc_handle =
2241 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2242 
2243 	if (!instance->tbolt) {
2244 		con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2245 		return;
2246 	}
2247 
2248 	index = cmd->index;
2249 
2250 	ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2251 
2252 	if (!ReqDescUnion) {
2253 		con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2254 		return;
2255 	}
2256 
2257 	con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2258 
2259 	ReqDescUnion->Words = 0;
2260 
2261 	ReqDescUnion->SCSIIO.RequestFlags =
2262 	    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2263 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2264 
2265 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2266 
2267 	cmd->request_desc = ReqDescUnion;
2268 
2269 	/* get raid message frame pointer */
2270 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2271 
2272 	if ((instance->device_id == PCI_DEVICE_ID_LSI_INVADER) ||
2273 	    (instance->device_id == PCI_DEVICE_ID_LSI_FURY)) {
2274 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2275 		    &scsi_raid_io->SGL.IeeeChain;
2276 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2277 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2278 	}
2279 
2280 	ddi_put8(acc_handle, &scsi_raid_io->Function,
2281 	    MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2282 
2283 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2284 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2285 
2286 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2287 	    (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2288 
2289 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2290 	    cmd->sense_phys_addr1);
2291 
2292 
2293 	scsi_raid_io_sgl_ieee =
2294 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2295 
2296 	ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2297 	    (U64)cmd->frame_phys_addr);
2298 
2299 	ddi_put8(acc_handle,
2300 	    &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2301 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2302 	/* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2303 	ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2304 
2305 	con_log(CL_ANN1, (CE_NOTE,
2306 	    "[MFI CMD PHY ADDRESS]:%" PRIx64,
2307 	    scsi_raid_io_sgl_ieee->Address));
2308 	con_log(CL_ANN1, (CE_NOTE,
2309 	    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2310 	con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2311 	    scsi_raid_io_sgl_ieee->Flags));
2312 }
2313 
2314 
2315 void
2316 tbolt_complete_cmd(struct mrsas_instance *instance,
2317     struct mrsas_cmd *cmd)
2318 {
2319 	uint8_t				status;
2320 	uint8_t				extStatus;
2321 	uint8_t				function;
2322 	uint8_t				arm;
2323 	struct scsa_cmd			*acmd;
2324 	struct scsi_pkt			*pkt;
2325 	struct scsi_arq_status		*arqstat;
2326 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2327 	LD_LOAD_BALANCE_INFO		*lbinfo;
2328 	ddi_acc_handle_t acc_handle =
2329 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2330 
2331 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2332 
2333 	status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2334 	extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2335 
2336 	con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2337 	con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2338 
2339 	if (status != MFI_STAT_OK) {
2340 		con_log(CL_ANN, (CE_WARN,
2341 		    "IO Cmd Failed SMID %x", cmd->SMID));
2342 	} else {
2343 		con_log(CL_ANN, (CE_NOTE,
2344 		    "IO Cmd Success  SMID %x", cmd->SMID));
2345 	}
2346 
2347 	/* regular commands */
2348 
2349 	function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2350 	DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2351 	    uint8_t, status, uint8_t, extStatus);
2352 
2353 	switch (function) {
2354 
2355 	case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2356 		acmd =	(struct scsa_cmd *)cmd->cmd;
2357 		lbinfo = &instance->load_balance_info[acmd->device_id];
2358 
2359 		if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2360 			arm = lbinfo->raid1DevHandle[0] ==
2361 			    scsi_raid_io->DevHandle ? 0 : 1;
2362 
2363 			lbinfo->scsi_pending_cmds[arm]--;
2364 			cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2365 		}
2366 		con_log(CL_DLEVEL3, (CE_NOTE,
2367 		    "FastPath IO Completion Success "));
2368 		/* FALLTHRU */
2369 
2370 	case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2371 		acmd =	(struct scsa_cmd *)cmd->cmd;
2372 		pkt =	(struct scsi_pkt *)CMD2PKT(acmd);
2373 
2374 		if (acmd->cmd_flags & CFLAG_DMAVALID) {
2375 			if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2376 				(void) ddi_dma_sync(acmd->cmd_dmahandle,
2377 				    acmd->cmd_dma_offset, acmd->cmd_dma_len,
2378 				    DDI_DMA_SYNC_FORCPU);
2379 			}
2380 		}
2381 
2382 		pkt->pkt_reason		= CMD_CMPLT;
2383 		pkt->pkt_statistics	= 0;
2384 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2385 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2386 
2387 		con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2388 		    "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2389 		    ((acmd->islogical) ? "LD" : "PD"),
2390 		    acmd->cmd_dmacount, cmd->SMID, status));
2391 
2392 		if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2393 			struct scsi_inquiry	*inq;
2394 
2395 			if (acmd->cmd_dmacount != 0) {
2396 				bp_mapin(acmd->cmd_buf);
2397 				inq = (struct scsi_inquiry *)
2398 				    acmd->cmd_buf->b_un.b_addr;
2399 
2400 				/* don't expose physical drives to OS */
2401 				if (acmd->islogical &&
2402 				    (status == MFI_STAT_OK)) {
2403 					display_scsi_inquiry((caddr_t)inq);
2404 #ifdef PDSUPPORT
2405 				} else if ((status == MFI_STAT_OK) &&
2406 				    inq->inq_dtype == DTYPE_DIRECT) {
2407 					display_scsi_inquiry((caddr_t)inq);
2408 #endif
2409 				} else {
2410 					/* for physical disk */
2411 					status = MFI_STAT_DEVICE_NOT_FOUND;
2412 				}
2413 			}
2414 		}
2415 
2416 		switch (status) {
2417 		case MFI_STAT_OK:
2418 			pkt->pkt_scbp[0] = STATUS_GOOD;
2419 			break;
2420 		case MFI_STAT_LD_CC_IN_PROGRESS:
2421 		case MFI_STAT_LD_RECON_IN_PROGRESS:
2422 			pkt->pkt_scbp[0] = STATUS_GOOD;
2423 			break;
2424 		case MFI_STAT_LD_INIT_IN_PROGRESS:
2425 			pkt->pkt_reason	= CMD_TRAN_ERR;
2426 			break;
2427 		case MFI_STAT_SCSI_IO_FAILED:
2428 			dev_err(instance->dip, CE_WARN,
2429 			    "tbolt_complete_cmd: scsi_io failed");
2430 			pkt->pkt_reason	= CMD_TRAN_ERR;
2431 			break;
2432 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
2433 			con_log(CL_ANN, (CE_WARN,
2434 			    "tbolt_complete_cmd: scsi_done with error"));
2435 
2436 			pkt->pkt_reason	= CMD_CMPLT;
2437 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2438 
2439 			if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2440 				con_log(CL_ANN,
2441 				    (CE_WARN, "TEST_UNIT_READY fail"));
2442 			} else {
2443 				pkt->pkt_state |= STATE_ARQ_DONE;
2444 				arqstat = (void *)(pkt->pkt_scbp);
2445 				arqstat->sts_rqpkt_reason = CMD_CMPLT;
2446 				arqstat->sts_rqpkt_resid = 0;
2447 				arqstat->sts_rqpkt_state |=
2448 				    STATE_GOT_BUS | STATE_GOT_TARGET
2449 				    | STATE_SENT_CMD
2450 				    | STATE_XFERRED_DATA;
2451 				*(uint8_t *)&arqstat->sts_rqpkt_status =
2452 				    STATUS_GOOD;
2453 				con_log(CL_ANN1,
2454 				    (CE_NOTE, "Copying Sense data %x",
2455 				    cmd->SMID));
2456 
2457 				ddi_rep_get8(acc_handle,
2458 				    (uint8_t *)&(arqstat->sts_sensedata),
2459 				    cmd->sense1,
2460 				    sizeof (struct scsi_extended_sense),
2461 				    DDI_DEV_AUTOINCR);
2462 
2463 			}
2464 			break;
2465 		case MFI_STAT_LD_OFFLINE:
2466 			dev_err(instance->dip, CE_WARN,
2467 			    "tbolt_complete_cmd: ld offline "
2468 			    "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2469 			    /* UNDO: */
2470 			    ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2471 
2472 			    ddi_get16(acc_handle,
2473 			    &scsi_raid_io->RaidContext.ldTargetId),
2474 
2475 			    ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2476 
2477 			pkt->pkt_reason	= CMD_DEV_GONE;
2478 			pkt->pkt_statistics  = STAT_DISCON;
2479 			break;
2480 		case MFI_STAT_DEVICE_NOT_FOUND:
2481 			con_log(CL_ANN, (CE_CONT,
2482 			    "tbolt_complete_cmd: device not found error"));
2483 			pkt->pkt_reason	= CMD_DEV_GONE;
2484 			pkt->pkt_statistics  = STAT_DISCON;
2485 			break;
2486 
2487 		case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2488 			pkt->pkt_state |= STATE_ARQ_DONE;
2489 			pkt->pkt_reason	= CMD_CMPLT;
2490 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2491 
2492 			arqstat = (void *)(pkt->pkt_scbp);
2493 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
2494 			arqstat->sts_rqpkt_resid = 0;
2495 			arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2496 			    | STATE_GOT_TARGET | STATE_SENT_CMD
2497 			    | STATE_XFERRED_DATA;
2498 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2499 
2500 			arqstat->sts_sensedata.es_valid = 1;
2501 			arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2502 			arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2503 
2504 			/*
2505 			 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2506 			 * ASC: 0x21h; ASCQ: 0x00h;
2507 			 */
2508 			arqstat->sts_sensedata.es_add_code = 0x21;
2509 			arqstat->sts_sensedata.es_qual_code = 0x00;
2510 			break;
2511 		case MFI_STAT_INVALID_CMD:
2512 		case MFI_STAT_INVALID_DCMD:
2513 		case MFI_STAT_INVALID_PARAMETER:
2514 		case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2515 		default:
2516 			dev_err(instance->dip, CE_WARN,
2517 			    "tbolt_complete_cmd: Unknown status!");
2518 			pkt->pkt_reason	= CMD_TRAN_ERR;
2519 
2520 			break;
2521 		}
2522 
2523 		atomic_add_16(&instance->fw_outstanding, (-1));
2524 
2525 		(void) mrsas_common_check(instance, cmd);
2526 		if (acmd->cmd_dmahandle) {
2527 			if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2528 			    DDI_SUCCESS) {
2529 				ddi_fm_service_impact(instance->dip,
2530 				    DDI_SERVICE_UNAFFECTED);
2531 				pkt->pkt_reason = CMD_TRAN_ERR;
2532 				pkt->pkt_statistics = 0;
2533 			}
2534 		}
2535 
2536 		/* Call the callback routine */
2537 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2538 			(*pkt->pkt_comp)(pkt);
2539 
2540 		con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2541 
2542 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2543 
2544 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2545 
2546 		return_raid_msg_pkt(instance, cmd);
2547 		break;
2548 	}
2549 	case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	 /* MFA command. */
2550 
2551 		if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2552 		    cmd->frame->dcmd.mbox.b[1] == 1) {
2553 
2554 			mutex_enter(&instance->sync_map_mtx);
2555 
2556 			con_log(CL_ANN, (CE_NOTE,
2557 			    "LDMAP sync command	SMID RECEIVED 0x%X",
2558 			    cmd->SMID));
2559 			if (cmd->frame->hdr.cmd_status != 0) {
2560 				dev_err(instance->dip, CE_WARN,
2561 				    "map sync failed, status = 0x%x.",
2562 				    cmd->frame->hdr.cmd_status);
2563 			} else {
2564 				instance->map_id++;
2565 				con_log(CL_ANN1, (CE_NOTE,
2566 				    "map sync received, switched map_id to %"
2567 				    PRIu64, instance->map_id));
2568 			}
2569 
2570 			if (MR_ValidateMapInfo(
2571 			    instance->ld_map[instance->map_id & 1],
2572 			    instance->load_balance_info)) {
2573 				instance->fast_path_io = 1;
2574 			} else {
2575 				instance->fast_path_io = 0;
2576 			}
2577 
2578 			con_log(CL_ANN, (CE_NOTE,
2579 			    "instance->fast_path_io %d",
2580 			    instance->fast_path_io));
2581 
2582 			instance->unroll.syncCmd = 0;
2583 
2584 			if (instance->map_update_cmd == cmd) {
2585 				return_raid_msg_pkt(instance, cmd);
2586 				atomic_add_16(&instance->fw_outstanding, (-1));
2587 				(void) mrsas_tbolt_sync_map_info(instance);
2588 			}
2589 
2590 			con_log(CL_ANN1, (CE_NOTE,
2591 			    "LDMAP sync completed, ldcount=%d",
2592 			    instance->ld_map[instance->map_id & 1]
2593 			    ->raidMap.ldCount));
2594 			mutex_exit(&instance->sync_map_mtx);
2595 			break;
2596 		}
2597 
2598 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2599 			con_log(CL_ANN1, (CE_CONT,
2600 			    "AEN command SMID RECEIVED 0x%X",
2601 			    cmd->SMID));
2602 			if ((instance->aen_cmd == cmd) &&
2603 			    (instance->aen_cmd->abort_aen)) {
2604 				con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2605 				    "aborted_aen returned"));
2606 			} else {
2607 				atomic_add_16(&instance->fw_outstanding, (-1));
2608 				service_mfi_aen(instance, cmd);
2609 			}
2610 		}
2611 
2612 		if (cmd->sync_cmd == MRSAS_TRUE) {
2613 			con_log(CL_ANN1, (CE_CONT,
2614 			    "Sync-mode Command Response SMID RECEIVED 0x%X",
2615 			    cmd->SMID));
2616 
2617 			tbolt_complete_cmd_in_sync_mode(instance, cmd);
2618 		} else {
2619 			con_log(CL_ANN, (CE_CONT,
2620 			    "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2621 			    cmd->SMID));
2622 		}
2623 		break;
2624 	default:
2625 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2626 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2627 
2628 		/* free message */
2629 		con_log(CL_ANN,
2630 		    (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2631 		break;
2632 	}
2633 }
2634 
2635 uint_t
2636 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2637 {
2638 	uint8_t				replyType;
2639 	Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2640 	Mpi2ReplyDescriptorsUnion_t	*desc;
2641 	uint16_t			smid;
2642 	union desc_value		d_val;
2643 	struct mrsas_cmd		*cmd;
2644 
2645 	struct mrsas_header	*hdr;
2646 	struct scsi_pkt		*pkt;
2647 
2648 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2649 	    0, 0, DDI_DMA_SYNC_FORDEV);
2650 
2651 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2652 	    0, 0, DDI_DMA_SYNC_FORCPU);
2653 
2654 	desc = instance->reply_frame_pool;
2655 	desc += instance->reply_read_index;
2656 
2657 	replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2658 	replyType = replyDesc->ReplyFlags &
2659 	    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2660 
2661 	if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2662 		return (DDI_INTR_UNCLAIMED);
2663 
2664 	if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2665 	    != DDI_SUCCESS) {
2666 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2667 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2668 		con_log(CL_ANN1,
2669 		    (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2670 		    "FMA check, returning DDI_INTR_UNCLAIMED"));
2671 		return (DDI_INTR_CLAIMED);
2672 	}
2673 
2674 	con_log(CL_ANN1, (CE_NOTE, "Reply Desc	= %p  Words = %" PRIx64,
2675 	    (void *)desc, desc->Words));
2676 
2677 	d_val.word = desc->Words;
2678 
2679 
2680 	/* Read Reply descriptor */
2681 	while ((d_val.u1.low != 0xffffffff) &&
2682 	    (d_val.u1.high != 0xffffffff)) {
2683 
2684 		(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2685 		    0, 0, DDI_DMA_SYNC_FORCPU);
2686 
2687 		smid = replyDesc->SMID;
2688 
2689 		if (!smid || smid > instance->max_fw_cmds + 1) {
2690 			con_log(CL_ANN1, (CE_NOTE,
2691 			    "Reply Desc at Break  = %p	Words = %" PRIx64,
2692 			    (void *)desc, desc->Words));
2693 			break;
2694 		}
2695 
2696 		cmd	= instance->cmd_list[smid - 1];
2697 		if (!cmd) {
2698 			con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2699 			    "outstanding_cmd: Invalid command "
2700 			    " or Poll commad Received in completion path"));
2701 		} else {
2702 			mutex_enter(&instance->cmd_pend_mtx);
2703 			if (cmd->sync_cmd == MRSAS_TRUE) {
2704 				hdr = (struct mrsas_header *)&cmd->frame->hdr;
2705 				if (hdr) {
2706 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2707 					    "tbolt_process_outstanding_cmd:"
2708 					    " mlist_del_init(&cmd->list)."));
2709 					mlist_del_init(&cmd->list);
2710 				}
2711 			} else {
2712 				pkt = cmd->pkt;
2713 				if (pkt) {
2714 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2715 					    "tbolt_process_outstanding_cmd:"
2716 					    "mlist_del_init(&cmd->list)."));
2717 					mlist_del_init(&cmd->list);
2718 				}
2719 			}
2720 
2721 			mutex_exit(&instance->cmd_pend_mtx);
2722 
2723 			tbolt_complete_cmd(instance, cmd);
2724 		}
2725 		/* set it back to all 1s. */
2726 		desc->Words = -1LL;
2727 
2728 		instance->reply_read_index++;
2729 
2730 		if (instance->reply_read_index >= (instance->reply_q_depth)) {
2731 			con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2732 			instance->reply_read_index = 0;
2733 		}
2734 
2735 		/* Get the next reply descriptor */
2736 		if (!instance->reply_read_index)
2737 			desc = instance->reply_frame_pool;
2738 		else
2739 			desc++;
2740 
2741 		replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2742 
2743 		d_val.word = desc->Words;
2744 
2745 		con_log(CL_ANN1, (CE_NOTE,
2746 		    "Next Reply Desc  = %p Words = %" PRIx64,
2747 		    (void *)desc, desc->Words));
2748 
2749 		replyType = replyDesc->ReplyFlags &
2750 		    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2751 
2752 		if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2753 			break;
2754 
2755 	} /* End of while loop. */
2756 
2757 	/* update replyIndex to FW */
2758 	WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2759 
2760 
2761 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2762 	    0, 0, DDI_DMA_SYNC_FORDEV);
2763 
2764 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2765 	    0, 0, DDI_DMA_SYNC_FORCPU);
2766 	return (DDI_INTR_CLAIMED);
2767 }
2768 
2769 
2770 
2771 
2772 /*
2773  * complete_cmd_in_sync_mode -	Completes an internal command
2774  * @instance:			Adapter soft state
2775  * @cmd:			Command to be completed
2776  *
2777  * The issue_cmd_in_sync_mode() function waits for a command to complete
2778  * after it issues a command. This function wakes up that waiting routine by
2779  * calling wake_up() on the wait queue.
2780  */
2781 void
2782 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2783     struct mrsas_cmd *cmd)
2784 {
2785 
2786 	cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2787 	    &cmd->frame->io.cmd_status);
2788 
2789 	cmd->sync_cmd = MRSAS_FALSE;
2790 
2791 	mutex_enter(&instance->int_cmd_mtx);
2792 	if (cmd->cmd_status == ENODATA) {
2793 		cmd->cmd_status = 0;
2794 	}
2795 	cv_broadcast(&instance->int_cmd_cv);
2796 	mutex_exit(&instance->int_cmd_mtx);
2797 
2798 }
2799 
2800 /*
2801  * mrsas_tbolt_get_ld_map_info -	Returns	 ld_map structure
2802  * instance:				Adapter soft state
2803  *
2804  * Issues an internal command (DCMD) to get the FW's controller PD
2805  * list structure.  This information is mainly used to find out SYSTEM
2806  * supported by the FW.
2807  */
2808 int
2809 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2810 {
2811 	int ret = 0;
2812 	struct mrsas_cmd	*cmd = NULL;
2813 	struct mrsas_dcmd_frame	*dcmd;
2814 	MR_FW_RAID_MAP_ALL *ci;
2815 	uint32_t ci_h = 0;
2816 	U32 size_map_info;
2817 
2818 	cmd = get_raid_msg_pkt(instance);
2819 
2820 	if (cmd == NULL) {
2821 		dev_err(instance->dip, CE_WARN,
2822 		    "Failed to get a cmd from free-pool in get_ld_map_info()");
2823 		return (DDI_FAILURE);
2824 	}
2825 
2826 	dcmd = &cmd->frame->dcmd;
2827 
2828 	size_map_info =	sizeof (MR_FW_RAID_MAP) +
2829 	    (sizeof (MR_LD_SPAN_MAP) *
2830 	    (MAX_LOGICAL_DRIVES - 1));
2831 
2832 	con_log(CL_ANN, (CE_NOTE,
2833 	    "size_map_info : 0x%x", size_map_info));
2834 
2835 	ci = instance->ld_map[instance->map_id & 1];
2836 	ci_h = instance->ld_map_phy[instance->map_id & 1];
2837 
2838 	if (!ci) {
2839 		dev_err(instance->dip, CE_WARN,
2840 		    "Failed to alloc mem for ld_map_info");
2841 		return_raid_msg_pkt(instance, cmd);
2842 		return (-1);
2843 	}
2844 
2845 	bzero(ci, sizeof (*ci));
2846 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2847 
2848 	dcmd->cmd = MFI_CMD_OP_DCMD;
2849 	dcmd->cmd_status = 0xFF;
2850 	dcmd->sge_count = 1;
2851 	dcmd->flags = MFI_FRAME_DIR_READ;
2852 	dcmd->timeout = 0;
2853 	dcmd->pad_0 = 0;
2854 	dcmd->data_xfer_len = size_map_info;
2855 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2856 	dcmd->sgl.sge32[0].phys_addr = ci_h;
2857 	dcmd->sgl.sge32[0].length = size_map_info;
2858 
2859 
2860 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2861 
2862 	if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2863 		ret = 0;
2864 		con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2865 	} else {
2866 		dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2867 		ret = -1;
2868 	}
2869 
2870 	return_raid_msg_pkt(instance, cmd);
2871 
2872 	return (ret);
2873 }
2874 
2875 void
2876 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2877 {
2878 	uint32_t i;
2879 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2880 	union desc_value d_val;
2881 
2882 	reply_desc = instance->reply_frame_pool;
2883 
2884 	for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2885 		d_val.word = reply_desc->Words;
2886 		con_log(CL_DLEVEL3, (CE_NOTE,
2887 		    "i=%d, %x:%x",
2888 		    i, d_val.u1.high, d_val.u1.low));
2889 	}
2890 }
2891 
2892 /*
2893  * mrsas_tbolt_command_create -	Create command for fast path.
2894  * @io_info:	MegaRAID IO request packet pointer.
2895  * @ref_tag:	Reference tag for RD/WRPROTECT
2896  *
2897  * Create the command for fast path.
2898  */
2899 void
2900 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2901     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2902     U32 ref_tag)
2903 {
2904 	uint16_t		EEDPFlags;
2905 	uint32_t		Control;
2906 	ddi_acc_handle_t acc_handle =
2907 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2908 
2909 	/* Prepare 32-byte CDB if DIF is supported on this device */
2910 	con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2911 
2912 	bzero(cdb, 32);
2913 
2914 	cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2915 
2916 
2917 	cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2918 
2919 	if (io_info->isRead)
2920 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2921 	else
2922 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2923 
2924 	/* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2925 	cdb[10] = MRSAS_RD_WR_PROTECT;
2926 
2927 	/* LOGICAL BLOCK ADDRESS */
2928 	cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2929 	cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2930 	cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2931 	cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2932 	cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2933 	cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2934 	cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2935 	cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2936 
2937 	/* Logical block reference tag */
2938 	ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2939 	    BE_32(ref_tag));
2940 
2941 	ddi_put16(acc_handle,
2942 	    &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2943 
2944 	ddi_put32(acc_handle, &scsi_io_request->DataLength,
2945 	    ((io_info->numBlocks)*512));
2946 	/* Specify 32-byte cdb */
2947 	ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2948 
2949 	/* Transfer length */
2950 	cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2951 	cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2952 	cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2953 	cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2954 
2955 	/* set SCSI IO EEDPFlags */
2956 	EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2957 	Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2958 
2959 	/* set SCSI IO EEDPFlags bits */
2960 	if (io_info->isRead) {
2961 		/*
2962 		 * For READ commands, the EEDPFlags shall be set to specify to
2963 		 * Increment the Primary Reference Tag, to Check the Reference
2964 		 * Tag, and to Check and Remove the Protection Information
2965 		 * fields.
2966 		 */
2967 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2968 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG	|
2969 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP	|
2970 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG	|
2971 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2972 	} else {
2973 		/*
2974 		 * For WRITE commands, the EEDPFlags shall be set to specify to
2975 		 * Increment the Primary Reference Tag, and to Insert
2976 		 * Protection Information fields.
2977 		 */
2978 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2979 		    MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2980 	}
2981 	Control |= (0x4 << 26);
2982 
2983 	ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2984 	ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2985 	ddi_put32(acc_handle,
2986 	    &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2987 }
2988 
2989 
2990 /*
2991  * mrsas_tbolt_set_pd_lba -	Sets PD LBA
2992  * @cdb:		CDB
2993  * @cdb_len:		cdb length
2994  * @start_blk:		Start block of IO
2995  *
2996  * Used to set the PD LBA in CDB for FP IOs
2997  */
2998 static void
2999 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
3000     U32 num_blocks)
3001 {
3002 	U8 cdb_len = *cdb_len_ptr;
3003 	U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3004 
3005 	/* Some drives don't support 16/12 byte CDB's, convert to 10 */
3006 	if (((cdb_len == 12) || (cdb_len == 16)) &&
3007 	    (start_blk <= 0xffffffff)) {
3008 		if (cdb_len == 16) {
3009 			con_log(CL_ANN,
3010 			    (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
3011 			opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3012 			flagvals = cdb[1];
3013 			groupnum = cdb[14];
3014 			control = cdb[15];
3015 		} else {
3016 			con_log(CL_ANN,
3017 			    (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3018 			opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3019 			flagvals = cdb[1];
3020 			groupnum = cdb[10];
3021 			control = cdb[11];
3022 		}
3023 
3024 		bzero(cdb, sizeof (cdb));
3025 
3026 		cdb[0] = opcode;
3027 		cdb[1] = flagvals;
3028 		cdb[6] = groupnum;
3029 		cdb[9] = control;
3030 		/* Set transfer length */
3031 		cdb[8] = (U8)(num_blocks & 0xff);
3032 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3033 		cdb_len = 10;
3034 	} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3035 		/* Convert to 16 byte CDB for large LBA's */
3036 		con_log(CL_ANN,
3037 		    (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3038 		switch (cdb_len) {
3039 		case 6:
3040 			opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3041 			control = cdb[5];
3042 			break;
3043 		case 10:
3044 			opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3045 			flagvals = cdb[1];
3046 			groupnum = cdb[6];
3047 			control = cdb[9];
3048 			break;
3049 		case 12:
3050 			opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3051 			flagvals = cdb[1];
3052 			groupnum = cdb[10];
3053 			control = cdb[11];
3054 			break;
3055 		}
3056 
3057 		bzero(cdb, sizeof (cdb));
3058 
3059 		cdb[0] = opcode;
3060 		cdb[1] = flagvals;
3061 		cdb[14] = groupnum;
3062 		cdb[15] = control;
3063 
3064 		/* Transfer length */
3065 		cdb[13] = (U8)(num_blocks & 0xff);
3066 		cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3067 		cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3068 		cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3069 
3070 		/* Specify 16-byte cdb */
3071 		cdb_len = 16;
3072 	} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3073 		/* convert to 10 byte CDB */
3074 		opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3075 		control = cdb[5];
3076 
3077 		bzero(cdb, sizeof (cdb));
3078 		cdb[0] = opcode;
3079 		cdb[9] = control;
3080 
3081 		/* Set transfer length */
3082 		cdb[8] = (U8)(num_blocks & 0xff);
3083 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3084 
3085 		/* Specify 10-byte cdb */
3086 		cdb_len = 10;
3087 	}
3088 
3089 
3090 	/* Fall through Normal case, just load LBA here */
3091 	switch (cdb_len) {
3092 	case 6:
3093 	{
3094 		U8 val = cdb[1] & 0xE0;
3095 		cdb[3] = (U8)(start_blk & 0xff);
3096 		cdb[2] = (U8)((start_blk >> 8) & 0xff);
3097 		cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3098 		break;
3099 	}
3100 	case 10:
3101 		cdb[5] = (U8)(start_blk & 0xff);
3102 		cdb[4] = (U8)((start_blk >> 8) & 0xff);
3103 		cdb[3] = (U8)((start_blk >> 16) & 0xff);
3104 		cdb[2] = (U8)((start_blk >> 24) & 0xff);
3105 		break;
3106 	case 12:
3107 		cdb[5]	  = (U8)(start_blk & 0xff);
3108 		cdb[4]	  = (U8)((start_blk >> 8) & 0xff);
3109 		cdb[3]	  = (U8)((start_blk >> 16) & 0xff);
3110 		cdb[2]	  = (U8)((start_blk >> 24) & 0xff);
3111 		break;
3112 
3113 	case 16:
3114 		cdb[9]	= (U8)(start_blk & 0xff);
3115 		cdb[8]	= (U8)((start_blk >> 8) & 0xff);
3116 		cdb[7]	= (U8)((start_blk >> 16) & 0xff);
3117 		cdb[6]	= (U8)((start_blk >> 24) & 0xff);
3118 		cdb[5]	= (U8)((start_blk >> 32) & 0xff);
3119 		cdb[4]	= (U8)((start_blk >> 40) & 0xff);
3120 		cdb[3]	= (U8)((start_blk >> 48) & 0xff);
3121 		cdb[2]	= (U8)((start_blk >> 56) & 0xff);
3122 		break;
3123 	}
3124 
3125 	*cdb_len_ptr = cdb_len;
3126 }
3127 
3128 
3129 static int
3130 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3131 {
3132 	MR_FW_RAID_MAP_ALL *ld_map;
3133 
3134 	if (!mrsas_tbolt_get_ld_map_info(instance)) {
3135 
3136 		ld_map = instance->ld_map[instance->map_id & 1];
3137 
3138 		con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3139 		    ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3140 
3141 		if (MR_ValidateMapInfo(
3142 		    instance->ld_map[instance->map_id & 1],
3143 		    instance->load_balance_info)) {
3144 			con_log(CL_ANN,
3145 			    (CE_CONT, "MR_ValidateMapInfo success"));
3146 
3147 			instance->fast_path_io = 1;
3148 			con_log(CL_ANN,
3149 			    (CE_NOTE, "instance->fast_path_io %d",
3150 			    instance->fast_path_io));
3151 
3152 			return (DDI_SUCCESS);
3153 		}
3154 
3155 	}
3156 
3157 	instance->fast_path_io = 0;
3158 	dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3159 	con_log(CL_ANN, (CE_NOTE,
3160 	    "instance->fast_path_io %d", instance->fast_path_io));
3161 
3162 	return (DDI_FAILURE);
3163 }
3164 
3165 /*
3166  * Marks HBA as bad. This will be called either when an
3167  * IO packet times out even after 3 FW resets
3168  * or FW is found to be fault even after 3 continuous resets.
3169  */
3170 
3171 void
3172 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3173 {
3174 	dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3175 
3176 	if (instance->deadadapter == 1)
3177 		return;
3178 
3179 	con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3180 	    "Writing to doorbell with MFI_STOP_ADP "));
3181 	mutex_enter(&instance->ocr_flags_mtx);
3182 	instance->deadadapter = 1;
3183 	mutex_exit(&instance->ocr_flags_mtx);
3184 	instance->func_ptr->disable_intr(instance);
3185 	WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3186 	/* Flush */
3187 	(void) RD_RESERVED0_REGISTER(instance);
3188 
3189 	(void) mrsas_print_pending_cmds(instance);
3190 	(void) mrsas_complete_pending_cmds(instance);
3191 }
3192 
3193 void
3194 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3195 {
3196 	int i;
3197 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3198 	instance->reply_read_index = 0;
3199 
3200 	/* initializing reply address to 0xFFFFFFFF */
3201 	reply_desc = instance->reply_frame_pool;
3202 
3203 	for (i = 0; i < instance->reply_q_depth; i++) {
3204 		reply_desc->Words = (uint64_t)~0;
3205 		reply_desc++;
3206 	}
3207 }
3208 
3209 int
3210 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3211 {
3212 	uint32_t status = 0x00;
3213 	uint32_t retry = 0;
3214 	uint32_t cur_abs_reg_val;
3215 	uint32_t fw_state;
3216 	uint32_t abs_state;
3217 	uint32_t i;
3218 
3219 	con_log(CL_ANN, (CE_NOTE,
3220 	    "mrsas_tbolt_reset_ppc entered"));
3221 
3222 	if (instance->deadadapter == 1) {
3223 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3224 		    "no more resets as HBA has been marked dead ");
3225 		return (DDI_FAILURE);
3226 	}
3227 
3228 	mutex_enter(&instance->ocr_flags_mtx);
3229 	instance->adapterresetinprogress = 1;
3230 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3231 	    "adpterresetinprogress flag set, time %llx", gethrtime()));
3232 	mutex_exit(&instance->ocr_flags_mtx);
3233 
3234 	instance->func_ptr->disable_intr(instance);
3235 
3236 	/* Add delay inorder to complete the ioctl & io cmds in-flight */
3237 	for (i = 0; i < 3000; i++) {
3238 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3239 	}
3240 
3241 	instance->reply_read_index = 0;
3242 
3243 retry_reset:
3244 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3245 	    ":Resetting TBOLT "));
3246 
3247 	WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3248 	WR_TBOLT_IB_WRITE_SEQ(4, instance);
3249 	WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3250 	WR_TBOLT_IB_WRITE_SEQ(2, instance);
3251 	WR_TBOLT_IB_WRITE_SEQ(7, instance);
3252 	WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3253 	con_log(CL_ANN1, (CE_NOTE,
3254 	    "mrsas_tbolt_reset_ppc: magic number written "
3255 	    "to write sequence register"));
3256 	delay(100 * drv_usectohz(MILLISEC));
3257 	status = RD_TBOLT_HOST_DIAG(instance);
3258 	con_log(CL_ANN1, (CE_NOTE,
3259 	    "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3260 	    "to write sequence register"));
3261 
3262 	while (status & DIAG_TBOLT_RESET_ADAPTER) {
3263 		delay(100 * drv_usectohz(MILLISEC));
3264 		status = RD_TBOLT_HOST_DIAG(instance);
3265 		if (retry++ == 100) {
3266 			dev_err(instance->dip, CE_WARN,
3267 			    "mrsas_tbolt_reset_ppc:"
3268 			    "resetadapter bit is set already "
3269 			    "check retry count %d", retry);
3270 			return (DDI_FAILURE);
3271 		}
3272 	}
3273 
3274 	WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3275 	delay(100 * drv_usectohz(MILLISEC));
3276 
3277 	ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3278 	    (uint8_t *)((uintptr_t)(instance)->regmap +
3279 	    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3280 
3281 	while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3282 		delay(100 * drv_usectohz(MILLISEC));
3283 		ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3284 		    (uint8_t *)((uintptr_t)(instance)->regmap +
3285 		    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3286 		if (retry++ == 100) {
3287 			/* Dont call kill adapter here */
3288 			/* RESET BIT ADAPTER is cleared by firmare */
3289 			/* mrsas_tbolt_kill_adapter(instance); */
3290 			dev_err(instance->dip, CE_WARN,
3291 			    "%s(): RESET FAILED; return failure!!!", __func__);
3292 			return (DDI_FAILURE);
3293 		}
3294 	}
3295 
3296 	con_log(CL_ANN,
3297 	    (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3298 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3299 	    "Calling mfi_state_transition_to_ready"));
3300 
3301 	abs_state = instance->func_ptr->read_fw_status_reg(instance);
3302 	retry = 0;
3303 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3304 		delay(100 * drv_usectohz(MILLISEC));
3305 		abs_state = instance->func_ptr->read_fw_status_reg(instance);
3306 	}
3307 	if (abs_state <= MFI_STATE_FW_INIT) {
3308 		dev_err(instance->dip, CE_WARN,
3309 		    "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3310 		    "state = 0x%x, RETRY RESET.", abs_state);
3311 		goto retry_reset;
3312 	}
3313 
3314 	/* Mark HBA as bad, if FW is fault after 3 continuous resets */
3315 	if (mfi_state_transition_to_ready(instance) ||
3316 	    debug_tbolt_fw_faults_after_ocr_g == 1) {
3317 		cur_abs_reg_val =
3318 		    instance->func_ptr->read_fw_status_reg(instance);
3319 		fw_state	= cur_abs_reg_val & MFI_STATE_MASK;
3320 
3321 		con_log(CL_ANN1, (CE_NOTE,
3322 		    "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3323 		    "FW state = 0x%x", fw_state));
3324 		if (debug_tbolt_fw_faults_after_ocr_g == 1)
3325 			fw_state = MFI_STATE_FAULT;
3326 
3327 		con_log(CL_ANN,
3328 		    (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3329 		    "FW state = 0x%x", fw_state));
3330 
3331 		if (fw_state == MFI_STATE_FAULT) {
3332 			/* increment the count */
3333 			instance->fw_fault_count_after_ocr++;
3334 			if (instance->fw_fault_count_after_ocr
3335 			    < MAX_FW_RESET_COUNT) {
3336 				dev_err(instance->dip, CE_WARN,
3337 				    "mrsas_tbolt_reset_ppc: "
3338 				    "FW is in fault after OCR count %d "
3339 				    "Retry Reset",
3340 				    instance->fw_fault_count_after_ocr);
3341 				goto retry_reset;
3342 
3343 			} else {
3344 				dev_err(instance->dip, CE_WARN, "%s:"
3345 				    "Max Reset Count exceeded >%d"
3346 				    "Mark HBA as bad, KILL adapter",
3347 				    __func__, MAX_FW_RESET_COUNT);
3348 
3349 				mrsas_tbolt_kill_adapter(instance);
3350 				return (DDI_FAILURE);
3351 			}
3352 		}
3353 	}
3354 
3355 	/* reset the counter as FW is up after OCR */
3356 	instance->fw_fault_count_after_ocr = 0;
3357 
3358 	mrsas_reset_reply_desc(instance);
3359 
3360 
3361 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3362 	    "Calling mrsas_issue_init_mpi2"));
3363 	abs_state = mrsas_issue_init_mpi2(instance);
3364 	if (abs_state == (uint32_t)DDI_FAILURE) {
3365 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3366 		    "INIT failed Retrying Reset");
3367 		goto retry_reset;
3368 	}
3369 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3370 	    "mrsas_issue_init_mpi2 Done"));
3371 
3372 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3373 	    "Calling mrsas_print_pending_cmd"));
3374 	(void) mrsas_print_pending_cmds(instance);
3375 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3376 	    "mrsas_print_pending_cmd done"));
3377 
3378 	instance->func_ptr->enable_intr(instance);
3379 	instance->fw_outstanding = 0;
3380 
3381 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3382 	    "Calling mrsas_issue_pending_cmds"));
3383 	(void) mrsas_issue_pending_cmds(instance);
3384 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3385 	"issue_pending_cmds done."));
3386 
3387 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3388 	    "Calling aen registration"));
3389 
3390 	instance->aen_cmd->retry_count_for_ocr = 0;
3391 	instance->aen_cmd->drv_pkt_time = 0;
3392 
3393 	instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3394 
3395 	con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3396 	mutex_enter(&instance->ocr_flags_mtx);
3397 	instance->adapterresetinprogress = 0;
3398 	mutex_exit(&instance->ocr_flags_mtx);
3399 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3400 	    "adpterresetinprogress flag unset"));
3401 
3402 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3403 	return (DDI_SUCCESS);
3404 
3405 }
3406 
3407 
3408 /*
3409  * mrsas_sync_map_info -	Returns FW's ld_map structure
3410  * @instance:				Adapter soft state
3411  *
3412  * Issues an internal command (DCMD) to get the FW's controller PD
3413  * list structure.  This information is mainly used to find out SYSTEM
3414  * supported by the FW.
3415  */
3416 
3417 static int
3418 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3419 {
3420 	int			ret = 0, i;
3421 	struct mrsas_cmd	*cmd = NULL;
3422 	struct mrsas_dcmd_frame	*dcmd;
3423 	uint32_t size_sync_info, num_lds;
3424 	LD_TARGET_SYNC *ci = NULL;
3425 	MR_FW_RAID_MAP_ALL *map;
3426 	MR_LD_RAID  *raid;
3427 	LD_TARGET_SYNC *ld_sync;
3428 	uint32_t ci_h = 0;
3429 	uint32_t size_map_info;
3430 
3431 	cmd = get_raid_msg_pkt(instance);
3432 
3433 	if (cmd == NULL) {
3434 		dev_err(instance->dip, CE_WARN,
3435 		    "Failed to get a cmd from free-pool in "
3436 		    "mrsas_tbolt_sync_map_info().");
3437 		return (DDI_FAILURE);
3438 	}
3439 
3440 	/* Clear the frame buffer and assign back the context id */
3441 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3442 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3443 	    cmd->index);
3444 	bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3445 
3446 
3447 	map = instance->ld_map[instance->map_id & 1];
3448 
3449 	num_lds = map->raidMap.ldCount;
3450 
3451 	dcmd = &cmd->frame->dcmd;
3452 
3453 	size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3454 
3455 	con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3456 	    size_sync_info, num_lds));
3457 
3458 	ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3459 
3460 	bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3461 	ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3462 
3463 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3464 
3465 	ld_sync = (LD_TARGET_SYNC *)ci;
3466 
3467 	for (i = 0; i < num_lds; i++, ld_sync++) {
3468 		raid = MR_LdRaidGet(i, map);
3469 
3470 		con_log(CL_ANN1,
3471 		    (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3472 		    i, raid->seqNum, raid->flags.ldSyncRequired));
3473 
3474 		ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3475 
3476 		con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3477 		    i, ld_sync->ldTargetId));
3478 
3479 		ld_sync->seqNum = raid->seqNum;
3480 	}
3481 
3482 
3483 	size_map_info = sizeof (MR_FW_RAID_MAP) +
3484 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3485 
3486 	dcmd->cmd = MFI_CMD_OP_DCMD;
3487 	dcmd->cmd_status = 0xFF;
3488 	dcmd->sge_count = 1;
3489 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3490 	dcmd->timeout = 0;
3491 	dcmd->pad_0 = 0;
3492 	dcmd->data_xfer_len = size_map_info;
3493 	ASSERT(num_lds <= 255);
3494 	dcmd->mbox.b[0] = (U8)num_lds;
3495 	dcmd->mbox.b[1] = 1; /* Pend */
3496 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3497 	dcmd->sgl.sge32[0].phys_addr = ci_h;
3498 	dcmd->sgl.sge32[0].length = size_map_info;
3499 
3500 
3501 	instance->map_update_cmd = cmd;
3502 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3503 
3504 	instance->func_ptr->issue_cmd(cmd, instance);
3505 
3506 	instance->unroll.syncCmd = 1;
3507 	con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3508 
3509 	return (ret);
3510 }
3511 
3512 /*
3513  * abort_syncmap_cmd
3514  */
3515 int
3516 abort_syncmap_cmd(struct mrsas_instance *instance,
3517     struct mrsas_cmd *cmd_to_abort)
3518 {
3519 	int	ret = 0;
3520 
3521 	struct mrsas_cmd		*cmd;
3522 	struct mrsas_abort_frame	*abort_fr;
3523 
3524 	con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3525 
3526 	cmd = get_raid_msg_mfi_pkt(instance);
3527 
3528 	if (!cmd) {
3529 		dev_err(instance->dip, CE_WARN,
3530 		    "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3531 		return (DDI_FAILURE);
3532 	}
3533 	/* Clear the frame buffer and assign back the context id */
3534 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3535 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3536 	    cmd->index);
3537 
3538 	abort_fr = &cmd->frame->abort;
3539 
3540 	/* prepare and issue the abort frame */
3541 	ddi_put8(cmd->frame_dma_obj.acc_handle,
3542 	    &abort_fr->cmd, MFI_CMD_OP_ABORT);
3543 	ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3544 	    MFI_CMD_STATUS_SYNC_MODE);
3545 	ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3546 	ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3547 	    cmd_to_abort->index);
3548 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3549 	    &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3550 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3551 	    &abort_fr->abort_mfi_phys_addr_hi, 0);
3552 
3553 	cmd->frame_count = 1;
3554 
3555 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3556 
3557 	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3558 		con_log(CL_ANN1, (CE_WARN,
3559 		    "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3560 		ret = -1;
3561 	} else {
3562 		ret = 0;
3563 	}
3564 
3565 	return_raid_msg_mfi_pkt(instance, cmd);
3566 
3567 	atomic_add_16(&instance->fw_outstanding, (-1));
3568 
3569 	return (ret);
3570 }
3571 
3572 
3573 #ifdef PDSUPPORT
3574 /*
3575  * Even though these functions were originally intended for 2208 only, it
3576  * turns out they're useful for "Skinny" support as well.  In a perfect world,
3577  * these two functions would be either in mr_sas.c, or in their own new source
3578  * file.  Since this driver needs some cleanup anyway, keep this portion in
3579  * mind as well.
3580  */
3581 
3582 int
3583 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3584     uint8_t lun, dev_info_t **ldip)
3585 {
3586 	struct scsi_device *sd;
3587 	dev_info_t *child;
3588 	int rval, dtype;
3589 	struct mrsas_tbolt_pd_info *pds = NULL;
3590 
3591 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3592 	    tgt, lun));
3593 
3594 	if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3595 		if (ldip) {
3596 			*ldip = child;
3597 		}
3598 		if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3599 			rval = mrsas_service_evt(instance, tgt, 1,
3600 			    MRSAS_EVT_UNCONFIG_TGT, NULL);
3601 			con_log(CL_ANN1, (CE_WARN,
3602 			    "mr_sas:DELETING STALE ENTRY  rval = %d "
3603 			    "tgt id = %d", rval, tgt));
3604 			return (NDI_FAILURE);
3605 		}
3606 		return (NDI_SUCCESS);
3607 	}
3608 
3609 	pds = (struct mrsas_tbolt_pd_info *)
3610 	    kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3611 	mrsas_tbolt_get_pd_info(instance, pds, tgt);
3612 	dtype = pds->scsiDevType;
3613 
3614 	/* Check for Disk */
3615 	if ((dtype == DTYPE_DIRECT)) {
3616 		if ((dtype == DTYPE_DIRECT) &&
3617 		    (LE_16(pds->fwState) != PD_SYSTEM)) {
3618 			kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3619 			return (NDI_FAILURE);
3620 		}
3621 		sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3622 		sd->sd_address.a_hba_tran = instance->tran;
3623 		sd->sd_address.a_target = (uint16_t)tgt;
3624 		sd->sd_address.a_lun = (uint8_t)lun;
3625 
3626 		if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3627 			rval = mrsas_config_scsi_device(instance, sd, ldip);
3628 			dev_err(instance->dip, CE_CONT,
3629 			    "?Phys. device found: tgt %d dtype %d: %s\n",
3630 			    tgt, dtype, sd->sd_inq->inq_vid);
3631 		} else {
3632 			rval = NDI_FAILURE;
3633 			con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3634 			    "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3635 			    tgt, dtype, sd->sd_inq->inq_vid));
3636 		}
3637 
3638 		/* sd_unprobe is blank now. Free buffer manually */
3639 		if (sd->sd_inq) {
3640 			kmem_free(sd->sd_inq, SUN_INQSIZE);
3641 			sd->sd_inq = (struct scsi_inquiry *)NULL;
3642 		}
3643 		kmem_free(sd, sizeof (struct scsi_device));
3644 	} else {
3645 		con_log(CL_ANN1, (CE_NOTE,
3646 		    "?Device not supported: tgt %d lun %d dtype %d",
3647 		    tgt, lun, dtype));
3648 		rval = NDI_FAILURE;
3649 	}
3650 
3651 	kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3652 	con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3653 	    rval));
3654 	return (rval);
3655 }
3656 
3657 static void
3658 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3659     struct mrsas_tbolt_pd_info *pds, int tgt)
3660 {
3661 	struct mrsas_cmd	*cmd;
3662 	struct mrsas_dcmd_frame	*dcmd;
3663 	dma_obj_t		dcmd_dma_obj;
3664 
3665 	ASSERT(instance->tbolt || instance->skinny);
3666 
3667 	if (instance->tbolt)
3668 		cmd = get_raid_msg_pkt(instance);
3669 	else
3670 		cmd = mrsas_get_mfi_pkt(instance);
3671 
3672 	if (!cmd) {
3673 		con_log(CL_ANN1,
3674 		    (CE_WARN, "Failed to get a cmd for get pd info"));
3675 		return;
3676 	}
3677 
3678 	/* Clear the frame buffer and assign back the context id */
3679 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3680 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3681 	    cmd->index);
3682 
3683 
3684 	dcmd = &cmd->frame->dcmd;
3685 	dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3686 	dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3687 	dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3688 	dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3689 	dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3690 	dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3691 
3692 	(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3693 	    DDI_STRUCTURE_LE_ACC);
3694 	bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3695 	bzero(dcmd->mbox.b, 12);
3696 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3697 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3698 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3699 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3700 	    MFI_FRAME_DIR_READ);
3701 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3702 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3703 	    sizeof (struct mrsas_tbolt_pd_info));
3704 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3705 	    MR_DCMD_PD_GET_INFO);
3706 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3707 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3708 	    sizeof (struct mrsas_tbolt_pd_info));
3709 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3710 	    dcmd_dma_obj.dma_cookie[0].dmac_address);
3711 
3712 	cmd->sync_cmd = MRSAS_TRUE;
3713 	cmd->frame_count = 1;
3714 
3715 	if (instance->tbolt)
3716 		mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3717 
3718 	instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3719 
3720 	ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3721 	    (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3722 	    DDI_DEV_AUTOINCR);
3723 	(void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3724 
3725 	if (instance->tbolt)
3726 		return_raid_msg_pkt(instance, cmd);
3727 	else
3728 		mrsas_return_mfi_pkt(instance, cmd);
3729 }
3730 #endif
3731