xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/lmrc/lmrc_scsa.c (revision b210e77709da8e42dfe621e10ccf4be504206058)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2023 Racktop Systems, Inc.
14  */
15 
16 /*
17  * This file implements the basic HBA interface to SCSAv3.
18  *
19  * For target initialization, we'll look up the driver target state by the
20  * device address and set it as HBA private in the struct scsi_device.
21  *
22  * The tran_reset(9e) and tran_abort(9e) entry points are implemented by a
23  * common function that sends the appropriate task management request to the
24  * target, iff the target supports task management requests. There is no support
25  * for bus resets. The case of RESET_ALL is special: sd(4d) issues a RESET_ALL
26  * in sddump() and errors out if that fails, so even if task management is
27  * unsupported by a target or the reset fails for any other reason, we return
28  * success. Any I/O errors due to an unsuccessful reset will be caught later.
29  *
30  * The tran_start(9e) code paths are almost identical for physical and logical
31  * devices, the major difference being that PDs will have the DevHandle in the
32  * MPT I/O frame set to the invalid DevHandle (0xffff), while LDs will use the
33  * target ID. Also, special settings are applied for LDs and PDs in the RAID
34  * context (VendorRegion of the MPT I/O frame). There is no support for fastpath
35  * I/O.
36  *
37  * In tran_setup_pkt(9e), a MPT command is allocated for the scsi_pkt, and its
38  * members are initialized as follows:
39  * - pkt_cdbp will point to the CDB structure embedded in the MPT I/O frame
40  * - pkt_scbp will point to the sense DMA memory allocated for the MPT command
41  * - pkt_scblen will be set to the size of the sense DMA memory
42  * - SenseBufferLowAddress and SenseBufferLength in the MPT I/O frame will be
43  *   set to the sense DMA address and length, respectively, adjusted to account
44  *   for the space needed for the ARQ pkt. (There is no SenseBufferHighAddress.)
45  * - rc_timeout is set to pkt_time, but it is unknown if that has any effect
46  */
47 
48 #include <sys/types.h>
49 #include <sys/ddi.h>
50 #include <sys/sunddi.h>
51 #include <sys/scsi/scsi.h>
52 
53 #include "lmrc.h"
54 #include "lmrc_reg.h"
55 
56 static int lmrc_getcap(struct scsi_address *, char *, int);
57 static int lmrc_setcap(struct scsi_address *, char *, int, int);
58 
59 static int lmrc_tran_tgt_init(dev_info_t *, dev_info_t *,
60     scsi_hba_tran_t *, struct scsi_device *);
61 static void lmrc_tran_tgt_free(dev_info_t *, dev_info_t *,
62     scsi_hba_tran_t *, struct scsi_device *);
63 
64 static int lmrc_tran_abort(struct scsi_address *, struct scsi_pkt *);
65 static int lmrc_tran_reset(struct scsi_address *, int);
66 
67 static int lmrc_tran_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t);
68 static void lmrc_tran_teardown_pkt(struct scsi_pkt *);
69 
70 boolean_t lmrc_relaxed_ordering = B_TRUE;
71 
72 static int
73 lmrc_getcap(struct scsi_address *sa, char *cap, int whom)
74 {
75 	struct scsi_device *sd = scsi_address_device(sa);
76 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
77 	lmrc_t *lmrc = tgt->tgt_lmrc;
78 	int index;
79 
80 	VERIFY(lmrc != NULL);
81 
82 	if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE)
83 		return (-1);
84 
85 	switch (index) {
86 	case SCSI_CAP_CDB_LEN:
87 		return (sizeof (((Mpi25SCSIIORequest_t *)NULL)->CDB.CDB32));
88 
89 	case SCSI_CAP_DMA_MAX:
90 		if (lmrc->l_dma_attr.dma_attr_maxxfer > INT_MAX)
91 			return (INT_MAX);
92 		return (lmrc->l_dma_attr.dma_attr_maxxfer);
93 
94 	case SCSI_CAP_SECTOR_SIZE:
95 		if (lmrc->l_dma_attr.dma_attr_granular > INT_MAX)
96 			return (INT_MAX);
97 		return (lmrc->l_dma_attr.dma_attr_granular);
98 
99 	case SCSI_CAP_INTERCONNECT_TYPE: {
100 		uint8_t interconnect_type;
101 
102 		rw_enter(&tgt->tgt_lock, RW_READER);
103 		interconnect_type = tgt->tgt_interconnect_type;
104 		rw_exit(&tgt->tgt_lock);
105 		return (interconnect_type);
106 	}
107 	case SCSI_CAP_MSG_OUT:
108 	case SCSI_CAP_WIDE_XFER:
109 	case SCSI_CAP_TAGGED_QING:
110 	case SCSI_CAP_UNTAGGED_QING:
111 	case SCSI_CAP_PARITY:
112 	case SCSI_CAP_ARQ:
113 		return (1);
114 
115 	case SCSI_CAP_RESET_NOTIFICATION:
116 	case SCSI_CAP_DISCONNECT:
117 	case SCSI_CAP_SYNCHRONOUS:
118 	case SCSI_CAP_LINKED_CMDS:
119 	case SCSI_CAP_INITIATOR_ID:
120 		return (0);
121 
122 	default:
123 		return (-1);
124 	}
125 }
126 
127 static int
128 lmrc_setcap(struct scsi_address *sa, char *cap, int value, int whom)
129 {
130 	struct scsi_device *sd = scsi_address_device(sa);
131 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
132 	lmrc_t *lmrc = tgt->tgt_lmrc;
133 	int index;
134 
135 	VERIFY(lmrc != NULL);
136 
137 	if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE)
138 		return (-1);
139 
140 	if (whom == 0)
141 		return (-1);
142 
143 	switch (index) {
144 	case SCSI_CAP_DMA_MAX:
145 		if (value <= lmrc->l_dma_attr.dma_attr_maxxfer)
146 			return (1);
147 		else
148 			return (0);
149 
150 	case SCSI_CAP_MSG_OUT:
151 	case SCSI_CAP_WIDE_XFER:
152 	case SCSI_CAP_TAGGED_QING:
153 	case SCSI_CAP_UNTAGGED_QING:
154 	case SCSI_CAP_PARITY:
155 	case SCSI_CAP_ARQ:
156 		if (value == 1)
157 			return (1);
158 		else
159 			return (0);
160 
161 	case SCSI_CAP_RESET_NOTIFICATION:
162 	case SCSI_CAP_DISCONNECT:
163 	case SCSI_CAP_SYNCHRONOUS:
164 	case SCSI_CAP_LINKED_CMDS:
165 	case SCSI_CAP_INITIATOR_ID:
166 		if (value == 0)
167 			return (1);
168 		else
169 			return (0);
170 
171 	case SCSI_CAP_SECTOR_SIZE:
172 	case SCSI_CAP_TOTAL_SECTORS:
173 		return (0);
174 
175 	default:
176 		return (-1);
177 	}
178 }
179 
180 /*
181  * lmrc_tran_tgt_init
182  *
183  * Find the driver target state and link it with the scsi_device.
184  */
185 static int
186 lmrc_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
187     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
188 {
189 	lmrc_t *lmrc = hba_tran->tran_hba_private;
190 	lmrc_tgt_t *tgt;
191 
192 	VERIFY(lmrc != NULL);
193 
194 	tgt = lmrc_tgt_find(lmrc, sd);
195 	if (tgt == NULL)
196 		return (DDI_FAILURE);
197 
198 	/* lmrc_tgt_find() returns the target read-locked. */
199 	scsi_device_hba_private_set(sd, tgt);
200 	rw_exit(&tgt->tgt_lock);
201 
202 
203 	return (DDI_SUCCESS);
204 }
205 
206 static void
207 lmrc_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
208     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
209 {
210 	scsi_device_hba_private_set(sd, NULL);
211 }
212 
213 /*
214  * lmrc_tran_start
215  *
216  * Start I/O of a scsi_pkt. Set up the MPT frame, the RAID context and if
217  * necessary the SGL for the transfer. Wait for a reply if this is polled I/O.
218  *
219  * There are subtle differences in the way I/O is done for LDs and PDs.
220  *
221  * There is no support for fastpath I/O.
222  */
223 static int
224 lmrc_tran_start(struct scsi_address *sa, struct scsi_pkt *pkt)
225 {
226 	Mpi25SCSIIORequest_t *io_req;
227 	lmrc_atomic_req_desc_t req_desc;
228 	lmrc_raidctx_g35_t *rc;
229 	struct scsi_device *sd;
230 	lmrc_scsa_cmd_t *cmd;
231 	lmrc_mpt_cmd_t *mpt;
232 	lmrc_tgt_t *tgt;
233 	lmrc_t *lmrc;
234 	uint8_t req_flags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
235 	boolean_t intr = (pkt->pkt_flags & FLAG_NOINTR) == 0;
236 	int ret = TRAN_BADPKT;
237 
238 	/*
239 	 * FLAG_NOINTR was set but we're not panicked. This may theoretically
240 	 * happen if scsi_transport() is called from an interrupt thread, and
241 	 * we don't support this.
242 	 */
243 	if (!intr && !ddi_in_panic())
244 		return (ret);
245 
246 	sd = scsi_address_device(sa);
247 	VERIFY(sd != NULL);
248 
249 	tgt = scsi_device_hba_private_get(sd);
250 	VERIFY(tgt != NULL);
251 
252 	cmd = pkt->pkt_ha_private;
253 	VERIFY(cmd != NULL);
254 
255 	VERIFY(cmd->sc_tgt == tgt);
256 
257 	lmrc = tgt->tgt_lmrc;
258 	VERIFY(lmrc != NULL);
259 
260 	if (lmrc->l_fw_fault)
261 		return (TRAN_FATAL_ERROR);
262 
263 	if (atomic_inc_uint_nv(&lmrc->l_fw_outstanding_cmds) >
264 	    lmrc->l_max_scsi_cmds) {
265 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
266 		return (TRAN_BUSY);
267 	}
268 
269 	rw_enter(&tgt->tgt_lock, RW_READER);
270 
271 	mpt = cmd->sc_mpt;
272 	VERIFY(mpt != NULL);
273 	mutex_enter(&mpt->mpt_lock);
274 
275 	io_req = mpt->mpt_io_frame;
276 
277 	io_req->Function = LMRC_MPI2_FUNCTION_LD_IO_REQUEST;
278 
279 	rc = &io_req->VendorRegion;
280 	rc->rc_ld_tgtid = tgt->tgt_dev_id;
281 
282 	rw_enter(&lmrc->l_raidmap_lock, RW_READER);
283 	rc->rc_timeout = lmrc->l_raidmap->rm_fp_pd_io_timeout;
284 	rw_exit(&lmrc->l_raidmap_lock);
285 
286 	if (tgt->tgt_pd_info == NULL) {
287 		/* This is LD I/O */
288 		io_req->DevHandle = tgt->tgt_dev_id;
289 
290 		if (lmrc_cmd_is_rw(pkt->pkt_cdbp[0])) {
291 			rc->rc_type = MPI2_TYPE_CUDA;
292 			rc->rc_nseg = 1;
293 			rc->rc_routing_flags.rf_sqn = 1;
294 		}
295 	} else {
296 		/* This is PD I/O */
297 		io_req->DevHandle = LMRC_DEVHDL_INVALID;
298 		rc->rc_raid_flags.rf_io_subtype = LMRC_RF_IO_SUBTYPE_SYSTEM_PD;
299 
300 		if (tgt->tgt_type == DTYPE_DIRECT &&
301 		    lmrc->l_use_seqnum_jbod_fp) {
302 			lmrc_pd_cfg_t *pdcfg;
303 
304 			rw_enter(&lmrc->l_pdmap_lock, RW_READER);
305 			pdcfg = &lmrc->l_pdmap->pm_pdcfg[tgt->tgt_dev_id];
306 
307 			if (lmrc->l_pdmap_tgtid_support)
308 				rc->rc_ld_tgtid = pdcfg->pd_tgtid;
309 
310 			rc->rc_cfg_seqnum = pdcfg->pd_seqnum;
311 			io_req->DevHandle = pdcfg->pd_devhdl;
312 			rw_exit(&lmrc->l_pdmap_lock);
313 
314 			if (lmrc_cmd_is_rw(pkt->pkt_cdbp[0])) {
315 				/*
316 				 * MPI2_TYPE_CUDA is valid only if FW supports
317 				 * JBOD Sequence number
318 				 */
319 				rc->rc_type = MPI2_TYPE_CUDA;
320 				rc->rc_nseg = 1;
321 				rc->rc_routing_flags.rf_sqn = 1;
322 
323 				io_req->Function =
324 				    MPI2_FUNCTION_SCSI_IO_REQUEST;
325 				io_req->IoFlags |=
326 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
327 				req_flags =
328 				    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
329 			}
330 		}
331 
332 	}
333 
334 	if (pkt->pkt_numcookies > 0) {
335 		if ((pkt->pkt_dma_flags & DDI_DMA_READ) != 0)
336 			io_req->Control |= MPI2_SCSIIO_CONTROL_READ;
337 
338 		if ((pkt->pkt_dma_flags & DDI_DMA_WRITE) != 0)
339 			io_req->Control |= MPI2_SCSIIO_CONTROL_WRITE;
340 
341 		lmrc_dma_build_sgl(lmrc, mpt, pkt->pkt_cookies,
342 		    pkt->pkt_numcookies);
343 
344 		io_req->DataLength = pkt->pkt_dma_len;
345 
346 		rc->rc_num_sge = pkt->pkt_numcookies;
347 	}
348 
349 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
350 	    (void *)io_req - lmrc->l_ioreq_dma.ld_buf,
351 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORDEV),
352 	    ==, DDI_SUCCESS);
353 
354 	req_desc = lmrc_build_atomic_request(lmrc, mpt, req_flags);
355 
356 	mpt->mpt_timeout = gethrtime() + pkt->pkt_time * NANOSEC;
357 	lmrc_send_atomic_request(lmrc, req_desc);
358 
359 	if (intr) {
360 		/* normal interrupt driven I/O processing */
361 		lmrc_tgt_add_active_mpt(tgt, mpt);
362 		ret = TRAN_ACCEPT;
363 	} else {
364 		/* FLAG_NOINTR was set and we're panicked */
365 		VERIFY(ddi_in_panic());
366 
367 		ret = lmrc_poll_for_reply(lmrc, mpt);
368 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
369 	}
370 
371 	mutex_exit(&mpt->mpt_lock);
372 	rw_exit(&tgt->tgt_lock);
373 
374 	return (ret);
375 }
376 
377 /*
378  * lmrc_task_mgmt
379  *
380  * Send a TASK MGMT command to a target, provied it is TM capable.
381  */
382 static int
383 lmrc_task_mgmt(lmrc_t *lmrc, lmrc_tgt_t *tgt, uint8_t type, uint16_t smid)
384 {
385 	Mpi2SCSITaskManagementRequest_t *tm_req;
386 	Mpi2SCSITaskManagementReply_t *tm_reply;
387 	uint64_t *pd_ld_flags;
388 	lmrc_atomic_req_desc_t req_desc;
389 	lmrc_mpt_cmd_t *mpt;
390 	clock_t ret;
391 	boolean_t tm_capable;
392 
393 	rw_enter(&tgt->tgt_lock, RW_READER);
394 
395 	/* Make sure the target can handle task mgmt commands. */
396 	if (tgt->tgt_pd_info == NULL) {
397 		tm_capable = lmrc_ld_tm_capable(lmrc, tgt->tgt_dev_id);
398 	} else {
399 		tm_capable = lmrc_pd_tm_capable(lmrc, tgt->tgt_dev_id);
400 	}
401 
402 	if (!tm_capable) {
403 		rw_exit(&tgt->tgt_lock);
404 		return (0);
405 	}
406 
407 	if (atomic_inc_uint_nv(&lmrc->l_fw_outstanding_cmds) >
408 	    lmrc->l_max_scsi_cmds) {
409 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
410 		return (0);
411 	}
412 
413 	mpt = lmrc_get_mpt(lmrc);
414 	ASSERT(mutex_owned(&mpt->mpt_lock));
415 
416 	bzero(mpt->mpt_io_frame, LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
417 	tm_req = mpt->mpt_io_frame;
418 	tm_reply = mpt->mpt_io_frame + 128;
419 	pd_ld_flags = (uint64_t *)tm_reply;
420 
421 
422 	tm_req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
423 	tm_req->TaskType = type;
424 	tm_req->TaskMID = smid;
425 	tm_req->DevHandle = tgt->tgt_dev_id;
426 
427 	/*
428 	 * The uint32_t immediately following the MPI2 task management request
429 	 * contains two flags indicating whether the target is a LD or PD.
430 	 */
431 	if (tgt->tgt_pd_info == NULL)
432 		*pd_ld_flags = 1<<0;
433 	else
434 		*pd_ld_flags = 1<<1;
435 
436 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
437 	    (void *)tm_req - lmrc->l_ioreq_dma.ld_buf,
438 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORDEV),
439 	    ==, DDI_SUCCESS);
440 
441 	req_desc = lmrc_build_atomic_request(lmrc, mpt,
442 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY);
443 
444 	lmrc_send_atomic_request(lmrc, req_desc);
445 
446 	/* Poll for completion if we're called while the system is panicked. */
447 	if (ddi_in_panic()) {
448 		ret = lmrc_poll_for_reply(lmrc, mpt);
449 	} else {
450 		clock_t timeout = drv_usectohz(LMRC_RESET_WAIT_TIME * MICROSEC);
451 
452 		timeout += ddi_get_lbolt();
453 		do {
454 			ret = cv_timedwait(&mpt->mpt_cv, &mpt->mpt_lock,
455 			    timeout);
456 		} while (mpt->mpt_complete == B_FALSE && ret != -1);
457 	}
458 
459 	atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
460 	lmrc_put_mpt(mpt);
461 	rw_exit(&tgt->tgt_lock);
462 
463 	if (ret >= 0)
464 		return (1);
465 	else
466 		return (-1);
467 }
468 
469 /*
470  * lmrc_abort_mpt
471  *
472  * Abort a MPT command by sending a TASK MGMT ABORT TASK command.
473  */
474 int
475 lmrc_abort_mpt(lmrc_t *lmrc, lmrc_tgt_t *tgt, lmrc_mpt_cmd_t *mpt)
476 {
477 	ASSERT(mutex_owned(&tgt->tgt_mpt_active_lock));
478 	ASSERT(mutex_owned(&mpt->mpt_lock));
479 
480 	return (lmrc_task_mgmt(lmrc, tgt, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
481 	    mpt->mpt_smid));
482 }
483 
484 /*
485  * lmrc_tran_abort
486  *
487  * Send a SCSI TASK MGMT request to abort a packet.
488  */
489 static int
490 lmrc_tran_abort(struct scsi_address *sa, struct scsi_pkt *pkt)
491 {
492 	struct scsi_device *sd = scsi_address_device(sa);
493 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
494 	lmrc_t *lmrc = tgt->tgt_lmrc;
495 	lmrc_scsa_cmd_t *cmd;
496 	lmrc_mpt_cmd_t *mpt;
497 	int ret = 0;
498 
499 	VERIFY(lmrc != NULL);
500 
501 	if (lmrc->l_fw_fault)
502 		return (0);
503 
504 	/*
505 	 * If no pkt was given, abort all outstanding pkts for this target.
506 	 */
507 	if (pkt == NULL) {
508 		mutex_enter(&tgt->tgt_mpt_active_lock);
509 		for (mpt = lmrc_tgt_first_active_mpt(tgt);
510 		    mpt != NULL;
511 		    mpt = lmrc_tgt_next_active_mpt(tgt, mpt)) {
512 			ASSERT(mutex_owned(&mpt->mpt_lock));
513 			if (mpt->mpt_complete)
514 				continue;
515 			if (mpt->mpt_pkt == NULL)
516 				continue;
517 
518 			if (lmrc_abort_mpt(lmrc, tgt, mpt) > 0)
519 				ret = 1;
520 		}
521 		mutex_exit(&tgt->tgt_mpt_active_lock);
522 
523 		return (ret);
524 	}
525 
526 	cmd = pkt->pkt_ha_private;
527 
528 	VERIFY(cmd != NULL);
529 	VERIFY(cmd->sc_tgt == tgt);
530 
531 	mpt = cmd->sc_mpt;
532 	VERIFY(mpt != NULL);
533 
534 	mutex_enter(&mpt->mpt_lock);
535 	ret = lmrc_abort_mpt(lmrc, tgt, mpt);
536 	mutex_exit(&mpt->mpt_lock);
537 
538 	if (ret == -1) {
539 		dev_err(lmrc->l_dip, CE_WARN, "!target reset timed out, "
540 		    "tgt %d", tgt->tgt_dev_id);
541 		return (0);
542 	}
543 
544 	return (ret);
545 }
546 
547 /*
548  * lmrc_tran_reset
549  *
550  * Reset a target. There's no support for RESET_LUN or RESET_ALL.
551  */
552 static int
553 lmrc_tran_reset(struct scsi_address *sa, int level)
554 {
555 	struct scsi_device *sd = scsi_address_device(sa);
556 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
557 	lmrc_t *lmrc = tgt->tgt_lmrc;
558 	int ret = 0;
559 
560 	VERIFY(lmrc != NULL);
561 
562 	if (lmrc->l_fw_fault)
563 		return (0);
564 
565 	switch (level) {
566 	case RESET_ALL:
567 	case RESET_LUN:
568 	case RESET_TARGET:
569 		rw_enter(&tgt->tgt_lock, RW_READER);
570 		ret = lmrc_task_mgmt(lmrc, tgt,
571 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0);
572 		rw_exit(&tgt->tgt_lock);
573 
574 		if (ret == -1) {
575 			dev_err(lmrc->l_dip, CE_WARN,
576 			    "!target reset timed out, tgt %d",
577 			    tgt->tgt_dev_id);
578 			return (0);
579 		}
580 
581 		break;
582 	}
583 
584 	/*
585 	 * Fake a successful return in the case of RESET_ALL for the benefit of
586 	 * being able to save kernel core dumps. sddump() wants to reset the
587 	 * device and errors out if that fails, even if that happens not because
588 	 * of an error but because of a reset not being supported.
589 	 */
590 	if (ret == 0 && level == RESET_ALL)
591 		ret = 1;
592 
593 	return (ret);
594 }
595 
596 /*
597  * lmrc_tran_setup_pkt
598  *
599  * Set up a MPT command for a scsi_pkt, and initialize scsi_pkt members as
600  * needed:
601  * - pkt_cdbp will point to the CDB structure embedded in the MPT I/O frame
602  * - pkt_scbp will point to the sense DMA memory allocated for the command
603  * - pkt_scblen will be set to the size of the sense DMA memory
604  * - SenseBufferLowAddress and SenseBufferLength in the MPT I/O frame will be
605  *   set to the sense DMA address and length, adjusted to account for the space
606  *   needed for the ARQ pkt. Note there is no SenseBufferHighAddress.
607  * - rc_timeout is set to pkt_time, but it is unknown if that has any effect
608  *
609  * The procedure is the same irrespective of whether the command is sent to a
610  * physical device or RAID volume.
611  */
612 static int
613 lmrc_tran_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t),
614     caddr_t arg)
615 {
616 	struct scsi_address *sa;
617 	struct scsi_device *sd;
618 	lmrc_tgt_t *tgt;
619 	lmrc_t *lmrc;
620 	lmrc_scsa_cmd_t *cmd;
621 	lmrc_mpt_cmd_t *mpt;
622 	Mpi25SCSIIORequest_t *io_req;
623 	lmrc_raidctx_g35_t *rc;
624 
625 	if (pkt->pkt_cdblen > sizeof (io_req->CDB.CDB32))
626 		return (-1);
627 
628 	sa = &pkt->pkt_address;
629 	VERIFY(sa != NULL);
630 
631 	sd = scsi_address_device(sa);
632 	VERIFY(sd != NULL);
633 
634 	tgt = scsi_device_hba_private_get(sd);
635 	VERIFY(tgt != NULL);
636 
637 	rw_enter(&tgt->tgt_lock, RW_READER);
638 
639 	lmrc = tgt->tgt_lmrc;
640 	VERIFY(lmrc != NULL);
641 
642 	cmd = pkt->pkt_ha_private;
643 	ASSERT(cmd != NULL);
644 
645 	mpt = lmrc_get_mpt(lmrc);
646 	ASSERT(mutex_owned(&mpt->mpt_lock));
647 
648 	io_req = mpt->mpt_io_frame;
649 
650 	pkt->pkt_cdbp = io_req->CDB.CDB32;
651 
652 	/* Just the CDB length now, but other flags may be set later. */
653 	io_req->IoFlags = pkt->pkt_cdblen;
654 
655 	/*
656 	 * Set up sense buffer. The DMA memory holds the whole ARQ structure,
657 	 * so point SenseBufferLowAddress to sts_sensedata and reduce the length
658 	 * accordingly.
659 	 */
660 	pkt->pkt_scbp = mpt->mpt_sense;
661 	pkt->pkt_scblen = lmrc_dma_get_size(&mpt->mpt_sense_dma);
662 
663 	lmrc_dma_set_addr32(&mpt->mpt_sense_dma,
664 	    &io_req->SenseBufferLowAddress);
665 	io_req->SenseBufferLowAddress +=
666 	    offsetof(struct scsi_arq_status, sts_sensedata);
667 	io_req->SenseBufferLength = pkt->pkt_scblen -
668 	    offsetof(struct scsi_arq_status, sts_sensedata);
669 
670 	rc = &io_req->VendorRegion;
671 	rc->rc_timeout = pkt->pkt_time;
672 
673 	cmd->sc_mpt = mpt;
674 	cmd->sc_tgt = tgt;
675 	mpt->mpt_pkt = pkt;
676 	mutex_exit(&mpt->mpt_lock);
677 	rw_exit(&tgt->tgt_lock);
678 
679 	return (0);
680 }
681 
682 /*
683  * lmrc_tran_teardown_pkt
684  *
685  * Return the MPT command to the free list. It'll be cleared later before
686  * it is reused.
687  */
688 static void
689 lmrc_tran_teardown_pkt(struct scsi_pkt *pkt)
690 {
691 	lmrc_scsa_cmd_t *cmd;
692 	lmrc_mpt_cmd_t *mpt;
693 
694 	cmd = pkt->pkt_ha_private;
695 	ASSERT(cmd != NULL);
696 
697 	mpt = cmd->sc_mpt;
698 	ASSERT(mpt != NULL);
699 
700 	mutex_enter(&mpt->mpt_lock);
701 	lmrc_put_mpt(mpt);
702 }
703 
704 /*
705  * lmrc_hba_attach
706  *
707  * Set up the HBA functions of lmrc. This is a SAS controller and uses complex
708  * addressing for targets, presenting physical devices (PDs) and RAID volumes
709  * (LD) as separate iports.
710  */
711 int
712 lmrc_hba_attach(lmrc_t *lmrc)
713 {
714 	scsi_hba_tran_t	*tran;
715 	ddi_dma_attr_t tran_attr = lmrc->l_dma_attr_32;
716 
717 	tran = scsi_hba_tran_alloc(lmrc->l_dip, SCSI_HBA_CANSLEEP);
718 	if (tran == NULL) {
719 		dev_err(lmrc->l_dip, CE_WARN, "!scsi_hba_tran_alloc failed");
720 		return (DDI_FAILURE);
721 	}
722 
723 	tran->tran_hba_private = lmrc;
724 
725 	tran->tran_tgt_init = lmrc_tran_tgt_init;
726 	tran->tran_tgt_free = lmrc_tran_tgt_free;
727 
728 	tran->tran_tgt_probe = scsi_hba_probe;
729 
730 	tran->tran_start = lmrc_tran_start;
731 	tran->tran_abort = lmrc_tran_abort;
732 	tran->tran_reset = lmrc_tran_reset;
733 
734 	tran->tran_getcap = lmrc_getcap;
735 	tran->tran_setcap = lmrc_setcap;
736 
737 	tran->tran_setup_pkt = lmrc_tran_setup_pkt;
738 	tran->tran_teardown_pkt = lmrc_tran_teardown_pkt;
739 	tran->tran_hba_len = sizeof (lmrc_scsa_cmd_t);
740 	tran->tran_interconnect_type = INTERCONNECT_SAS;
741 
742 	if (lmrc_relaxed_ordering)
743 		tran_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
744 	tran_attr.dma_attr_sgllen = lmrc->l_max_num_sge;
745 
746 	if (scsi_hba_attach_setup(lmrc->l_dip, &tran_attr, tran,
747 	    SCSI_HBA_HBA | SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS)
748 		goto fail;
749 
750 	lmrc->l_hba_tran = tran;
751 
752 	if (scsi_hba_iport_register(lmrc->l_dip, LMRC_IPORT_RAID) !=
753 	    DDI_SUCCESS)
754 		goto fail;
755 
756 	if (scsi_hba_iport_register(lmrc->l_dip, LMRC_IPORT_PHYS) !=
757 	    DDI_SUCCESS)
758 		goto fail;
759 
760 	return (DDI_SUCCESS);
761 
762 fail:
763 	dev_err(lmrc->l_dip, CE_WARN,
764 	    "!could not attach to SCSA framework");
765 	lmrc_hba_detach(lmrc);
766 
767 	return (DDI_FAILURE);
768 }
769 
770 void
771 lmrc_hba_detach(lmrc_t *lmrc)
772 {
773 	if (lmrc->l_hba_tran == NULL)
774 		return;
775 
776 	(void) scsi_hba_detach(lmrc->l_dip);
777 	scsi_hba_tran_free(lmrc->l_hba_tran);
778 	lmrc->l_hba_tran = NULL;
779 }
780