xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/lmrc/lmrc_raid.c (revision b210e77709da8e42dfe621e10ccf4be504206058)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2023 Racktop Systems, Inc.
14  */
15 
16 /*
17  * This file implements the RAID iport and tgtmap of lmrc.
18  *
19  * When the RAID iport is attached, a FULLSET tgtmap is created for RAID
20  * devices (LDs). This does not only include RAID volumes, as one would expect,
21  * but also physical disk on some controllers in JBOD mode.
22  *
23  * During attach or as a result of an async event received from the hardware,
24  * we'll get the LD list from the HBA and populate the tgtmap with what we have
25  * found. For each LD we'll try to get the SAS WWN by sending an INQUIRY for
26  * VPD 0x83, setting up a temporary struct scsi_device to be able to use the
27  * normal SCSI I/O code path despite the device not being known to the system
28  * at this point.
29  *
30  * If the device has a SAS WWN, this will be used as device address. Otherwise
31  * we'll use the internal target ID the HBA uses.
32  *
33  * The target activate and deactivate callbacks for RAID devices are kept really
34  * simple, just calling the common lmrc_tgt init/clear functions.
35  */
36 
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 
40 #include "lmrc.h"
41 #include "lmrc_reg.h"
42 #include "lmrc_raid.h"
43 
44 static int lmrc_get_raidmap(lmrc_t *, lmrc_fw_raid_map_t **);
45 static int lmrc_sync_raidmap(lmrc_t *);
46 static void lmrc_sync_raidmap_again(lmrc_t *, lmrc_mfi_cmd_t *);
47 static void lmrc_complete_sync_raidmap(lmrc_t *, lmrc_mfi_cmd_t *);
48 static int lmrc_validate_raidmap(lmrc_t *, lmrc_fw_raid_map_t *);
49 
50 static void lmrc_raid_tgt_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t,
51     void **);
52 static boolean_t lmrc_raid_tgt_deactivate_cb(void *, char *,
53     scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t);
54 static struct buf *lmrc_raid_send_inquiry(lmrc_t *, lmrc_tgt_t *, uint8_t,
55     uint8_t);
56 static uint64_t lmrc_raid_get_wwn(lmrc_t *, uint8_t);
57 static int lmrc_raid_update_tgtmap(lmrc_t *, lmrc_ld_tgtid_list_t *);
58 
59 
60 /*
61  * lmrc_get_raidmap
62  *
63  * Get the RAID map from firmware. Return a minimally sized copy.
64  */
65 static int
66 lmrc_get_raidmap(lmrc_t *lmrc, lmrc_fw_raid_map_t **raidmap)
67 {
68 	lmrc_mfi_cmd_t *mfi;
69 	lmrc_fw_raid_map_t *rm;
70 	int ret;
71 
72 	mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, LMRC_DCMD_LD_MAP_GET_INFO,
73 	    lmrc->l_max_map_sz, 4);
74 
75 	if (mfi == NULL)
76 		return (DDI_FAILURE);
77 
78 	ret = lmrc_issue_blocked_mfi(lmrc, mfi);
79 
80 	if (ret != DDI_SUCCESS)
81 		goto out;
82 
83 	(void) ddi_dma_sync(mfi->mfi_data_dma.ld_hdl, 0,
84 	    mfi->mfi_data_dma.ld_len, DDI_DMA_SYNC_FORKERNEL);
85 
86 	rm = mfi->mfi_data_dma.ld_buf;
87 	if (rm->rm_raidmap_sz > lmrc->l_max_map_sz) {
88 		dev_err(lmrc->l_dip, CE_WARN,
89 		    "!FW reports a too large RAID map size: %d",
90 		    rm->rm_raidmap_sz);
91 		ret = DDI_FAILURE;
92 		goto out;
93 	}
94 
95 	*raidmap = kmem_zalloc(rm->rm_raidmap_sz, KM_SLEEP);
96 	bcopy(rm, *raidmap, rm->rm_raidmap_sz);
97 
98 out:
99 	lmrc_put_dcmd(lmrc, mfi);
100 
101 	return (ret);
102 }
103 
104 /*
105  * lmrc_sync_raidmap
106  *
107  * Generate a LD target map from the RAID map and send that to the firmware.
108  * The command will complete when firmware detects a change, returning a new
109  * RAID map in the DMA memory. The size of the RAID map isn't expected to
110  * change, so thats what's used as size for the DMA memory.
111  *
112  * mbox byte values:
113  * [0]:		number of LDs
114  * [1]:		PEND_FLAG, delay completion until a config change pending
115  */
116 static int
117 lmrc_sync_raidmap(lmrc_t *lmrc)
118 {
119 	lmrc_fw_raid_map_t *rm;
120 	lmrc_mfi_cmd_t *mfi;
121 	lmrc_mfi_dcmd_payload_t *dcmd;
122 
123 	rw_enter(&lmrc->l_raidmap_lock, RW_READER);
124 	rm = lmrc->l_raidmap;
125 	mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_WRITE,
126 	    LMRC_DCMD_LD_MAP_GET_INFO, rm->rm_raidmap_sz, 4);
127 
128 	if (mfi == NULL) {
129 		rw_exit(&lmrc->l_raidmap_lock);
130 		return (DDI_FAILURE);
131 	}
132 
133 	dcmd = &mfi->mfi_frame->mf_dcmd;
134 	dcmd->md_mbox_8[0] = rm->rm_ld_count;
135 	dcmd->md_mbox_8[1] = LMRC_DCMD_MBOX_PEND_FLAG;
136 	rw_exit(&lmrc->l_raidmap_lock);
137 
138 	mutex_enter(&mfi->mfi_lock);
139 	lmrc_sync_raidmap_again(lmrc, mfi);
140 	mutex_exit(&mfi->mfi_lock);
141 
142 	return (DDI_SUCCESS);
143 }
144 
145 /*
146  * lmrc_sync_raidmap_again
147  *
148  * Called by lmrc_sync_raidmap() and lmrc_complete_sync_raidmap() to avoid
149  * deallocating and reallocating DMA memory and MFI command in the latter,
150  * while executing in interrupt context.
151  *
152  * This is doing the actual work of building the LD target map for FW and
153  * issuing the command, but it does no sleeping allocations and it cannot fail.
154  */
155 static void
156 lmrc_sync_raidmap_again(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
157 {
158 	lmrc_fw_raid_map_t *rm;
159 	lmrc_dma_t *dma = &mfi->mfi_data_dma;
160 	lmrc_ld_tgt_t *ld_sync = dma->ld_buf;
161 	lmrc_mfi_dcmd_payload_t *dcmd = &mfi->mfi_frame->mf_dcmd;
162 	uint32_t ld;
163 
164 	bzero(dma->ld_buf, dma->ld_len);
165 
166 	rw_enter(&lmrc->l_raidmap_lock, RW_READER);
167 	rm = lmrc->l_raidmap;
168 	for (ld = 0; ld < rm->rm_ld_count; ld++) {
169 		lmrc_ld_raid_t *lr = lmrc_ld_raid_get(ld, rm);
170 
171 		ASSERT(lr != NULL);
172 
173 		ld_sync[ld].lt_tgtid = lr->lr_target_id;
174 		ld_sync[ld].lt_seqnum = lr->lr_seq_num;
175 	}
176 	dcmd->md_mbox_8[0] = rm->rm_ld_count;
177 	rw_exit(&lmrc->l_raidmap_lock);
178 
179 	ASSERT(mutex_owned(&mfi->mfi_lock));
180 	lmrc_issue_mfi(lmrc, mfi, lmrc_complete_sync_raidmap);
181 }
182 
183 /*
184  * lmrc_complete_sync_raidmap
185  *
186  * The firmware completed our request to sync the LD target map, indicating
187  * that the configuration has changed. There's a new RAID map in the DMA memory.
188  */
189 static void
190 lmrc_complete_sync_raidmap(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
191 {
192 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
193 	lmrc_dma_t *dma = &mfi->mfi_data_dma;
194 	lmrc_fw_raid_map_t *rm = dma->ld_buf;
195 
196 	ASSERT(mutex_owned(&mfi->mfi_lock));
197 
198 	if (hdr->mh_cmd_status != MFI_STAT_OK) {
199 		/* Was the command aborted? */
200 		if (hdr->mh_cmd_status == MFI_STAT_NOT_FOUND)
201 			return;
202 
203 		dev_err(lmrc->l_dip, CE_WARN,
204 		    "!LD target map sync failed, status = %d",
205 		    hdr->mh_cmd_status);
206 		taskq_dispatch_ent(lmrc->l_taskq, (task_func_t *)lmrc_put_mfi,
207 		    mfi, TQ_NOSLEEP, &mfi->mfi_tqent);
208 		return;
209 	}
210 
211 	if (lmrc_validate_raidmap(lmrc, rm) != DDI_SUCCESS)
212 		return;
213 
214 	rw_enter(&lmrc->l_raidmap_lock, RW_WRITER);
215 	VERIFY3U(lmrc->l_raidmap->rm_raidmap_sz, ==, dma->ld_len);
216 	bcopy(rm, lmrc->l_raidmap, lmrc->l_raidmap->rm_raidmap_sz);
217 	rw_exit(&lmrc->l_raidmap_lock);
218 	lmrc_sync_raidmap_again(lmrc, mfi);
219 }
220 
221 /*
222  * lmrc_validata_raidmap
223  *
224  * Basic sanity checks of a RAID map as returned by the firmware.
225  */
226 static int
227 lmrc_validate_raidmap(lmrc_t *lmrc, lmrc_fw_raid_map_t *raidmap)
228 {
229 	lmrc_raid_map_desc_t *desc;
230 	int i;
231 
232 	/* Do a basic sanity check of the descriptor table offset and sizes. */
233 	if (raidmap->rm_desc_table_off > raidmap->rm_raidmap_sz)
234 		return (DDI_FAILURE);
235 	if (raidmap->rm_desc_table_off + raidmap->rm_desc_table_sz >
236 	    raidmap->rm_raidmap_sz)
237 		return (DDI_FAILURE);
238 	if (raidmap->rm_desc_table_nelem != LMRC_RAID_MAP_DESC_TYPES_COUNT)
239 		return (DDI_FAILURE);
240 	if (raidmap->rm_desc_table_sz !=
241 	    raidmap->rm_desc_table_nelem * sizeof (lmrc_raid_map_desc_t))
242 		return (DDI_FAILURE);
243 
244 	desc = (lmrc_raid_map_desc_t *)
245 	    ((uint8_t *)raidmap + raidmap->rm_desc_table_off);
246 
247 	/* Fill in descriptor pointers */
248 	for (i = 0; i < raidmap->rm_desc_table_nelem; i++) {
249 		/* Do a basic sanity check of the descriptor itself. */
250 		if (desc[i].rmd_type >= LMRC_RAID_MAP_DESC_TYPES_COUNT)
251 			return (DDI_FAILURE);
252 		if (desc[i].rmd_off + raidmap->rm_desc_table_off +
253 		    raidmap->rm_desc_table_sz >
254 		    raidmap->rm_raidmap_sz)
255 			return (DDI_FAILURE);
256 		if (desc[i].rmd_off + desc[i].rmd_bufsz +
257 		    raidmap->rm_desc_table_off + raidmap->rm_desc_table_sz >
258 		    raidmap->rm_raidmap_sz)
259 			return (DDI_FAILURE);
260 
261 		raidmap->rm_desc_ptrs[desc[i].rmd_type] = (void *)
262 		    ((uint8_t *)desc + raidmap->rm_desc_table_sz +
263 		    desc[i].rmd_off);
264 	}
265 
266 	return (DDI_SUCCESS);
267 }
268 
269 /*
270  * lmrc_setup_raidmap
271  *
272  * Get the crrent RAID map from the firmware. If it validates, replace the
273  * copy in the soft state and send a LD target map to the firmware.
274  */
275 int
276 lmrc_setup_raidmap(lmrc_t *lmrc)
277 {
278 	lmrc_fw_raid_map_t *raidmap;
279 	int ret;
280 
281 	ret = lmrc_get_raidmap(lmrc, &raidmap);
282 	if (ret != DDI_SUCCESS)
283 		return (ret);
284 
285 	ret = lmrc_validate_raidmap(lmrc, raidmap);
286 	if (ret != DDI_SUCCESS) {
287 		kmem_free(raidmap, raidmap->rm_raidmap_sz);
288 		return (ret);
289 	}
290 
291 	rw_enter(&lmrc->l_raidmap_lock, RW_WRITER);
292 	lmrc_free_raidmap(lmrc);
293 	lmrc->l_raidmap = raidmap;
294 	rw_exit(&lmrc->l_raidmap_lock);
295 
296 	ret = lmrc_sync_raidmap(lmrc);
297 
298 	return (ret);
299 }
300 
301 /*
302  * lmrc_free_raidmap
303  *
304  * Free the buffer used to hold the RAID map.
305  */
306 void
307 lmrc_free_raidmap(lmrc_t *lmrc)
308 {
309 	if (lmrc->l_raidmap != NULL) {
310 		kmem_free(lmrc->l_raidmap, lmrc->l_raidmap->rm_raidmap_sz);
311 		lmrc->l_raidmap = NULL;
312 	}
313 }
314 
315 /*
316  * lmrc_ld_tm_capable
317  */
318 boolean_t
319 lmrc_ld_tm_capable(lmrc_t *lmrc, uint16_t tgtid)
320 {
321 	boolean_t tm_capable = B_FALSE;
322 
323 	rw_enter(&lmrc->l_raidmap_lock, RW_READER);
324 	if (lmrc->l_raidmap != NULL) {
325 		uint16_t ld_id = lmrc_ld_id_get(tgtid, lmrc->l_raidmap);
326 		lmrc_ld_raid_t *lr = lmrc_ld_raid_get(ld_id, lmrc->l_raidmap);
327 
328 		if (lr->lr_cap.lc_tm_cap != 0)
329 			tm_capable = B_TRUE;
330 	}
331 	rw_exit(&lmrc->l_raidmap_lock);
332 
333 	return (tm_capable);
334 }
335 
336 
337 
338 /*
339  * lmrc_raid_tgt_activate_cb
340  *
341  * Set up a tgt structure for a newly discovered LD.
342  */
343 static void
344 lmrc_raid_tgt_activate_cb(void *tgtmap_priv, char *tgt_addr,
345     scsi_tgtmap_tgt_type_t type, void **tgt_privp)
346 {
347 	lmrc_t *lmrc = tgtmap_priv;
348 	lmrc_tgt_t *tgt = *tgt_privp;
349 	uint16_t tgtid = tgt - lmrc->l_targets;
350 
351 	VERIFY(lmrc == tgt->tgt_lmrc);
352 
353 	VERIFY3U(tgtid, <, LMRC_MAX_LD);
354 
355 	lmrc_tgt_init(tgt, tgtid, tgt_addr, NULL);
356 }
357 
358 /*
359  * lmrc_raid_tgt_deactivate_cb
360  *
361  * Tear down the tgt structure of a LD that is no longer present.
362  */
363 static boolean_t
364 lmrc_raid_tgt_deactivate_cb(void *tgtmap_priv, char *tgtaddr,
365     scsi_tgtmap_tgt_type_t type, void *tgt_priv, scsi_tgtmap_deact_rsn_t deact)
366 {
367 	lmrc_t *lmrc = tgtmap_priv;
368 	lmrc_tgt_t *tgt = tgt_priv;
369 
370 	VERIFY(lmrc == tgt->tgt_lmrc);
371 
372 	lmrc_tgt_clear(tgt);
373 
374 	return (B_FALSE);
375 }
376 
377 /*
378  * lmrc_raid_send_inquiry
379  *
380  * Fake a scsi_device and scsi_address, use the SCSA functions to allocate
381  * a buf and a scsi_pkt, and issue a INQUIRY command to the target. Return
382  * the buf on success, NULL otherwise.
383  */
384 static struct buf *
385 lmrc_raid_send_inquiry(lmrc_t *lmrc, lmrc_tgt_t *tgt, uint8_t evpd,
386     uint8_t page_code)
387 {
388 	struct buf *inq_bp = NULL;
389 	struct scsi_pkt *inq_pkt = NULL;
390 	const size_t len = 0xf0; /* max INQUIRY length */
391 	struct scsi_device sd;
392 	int ret;
393 
394 	/*
395 	 * Fake a scsi_device and scsi_address so we can use the scsi functions,
396 	 * which in turn call our tran_setup_pkt and tran_start functions.
397 	 */
398 	bzero(&sd, sizeof (sd));
399 	sd.sd_address.a_hba_tran = ddi_get_driver_private(lmrc->l_raid_dip);
400 	sd.sd_address.a.a_sd = &sd;
401 	scsi_device_hba_private_set(&sd, tgt);
402 
403 	/*
404 	 * Get a buffer for INQUIRY.
405 	 */
406 	inq_bp = scsi_alloc_consistent_buf(&sd.sd_address, NULL,
407 	    len, B_READ, SLEEP_FUNC, NULL);
408 
409 	if (inq_bp == NULL)
410 		goto out;
411 
412 	inq_pkt = scsi_init_pkt(&sd.sd_address, NULL, inq_bp, CDB_GROUP0,
413 	    sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, SLEEP_FUNC,
414 	    NULL);
415 
416 	if (inq_pkt == NULL)
417 		goto fail;
418 
419 	(void) scsi_setup_cdb((union scsi_cdb *)inq_pkt->pkt_cdbp,
420 	    SCMD_INQUIRY, 0, len, 0);
421 	inq_pkt->pkt_cdbp[1] = evpd;
422 	inq_pkt->pkt_cdbp[2] = page_code;
423 
424 	ret = scsi_poll(inq_pkt);
425 
426 	scsi_destroy_pkt(inq_pkt);
427 
428 	if (ret != 0) {
429 fail:
430 		scsi_free_consistent_buf(inq_bp);
431 		inq_bp = NULL;
432 	}
433 
434 out:
435 	return (inq_bp);
436 }
437 
438 /*
439  * lmrc_raid_get_wwn
440  *
441  * LDs may have a WWN, but the hardware doesn't just tell us about it.
442  * Send an INQUIRY to the target and get VPD page 0x83. If the target
443  * does have a WWN, return it.
444  */
445 static uint64_t
446 lmrc_raid_get_wwn(lmrc_t *lmrc, uint8_t tgtid)
447 {
448 	lmrc_tgt_t *tgt = &lmrc->l_targets[tgtid];
449 	char *guid = NULL;
450 	struct buf *inq_bp = NULL, *inq83_bp = NULL;
451 	uint64_t wwn = 0;
452 	ddi_devid_t devid;
453 	int ret;
454 
455 	/*
456 	 * Make sure we have the target ID set in the target structure.
457 	 */
458 	rw_enter(&tgt->tgt_lock, RW_WRITER);
459 	VERIFY3U(tgt->tgt_lmrc, ==, lmrc);
460 	if (tgt->tgt_dev_id == LMRC_DEVHDL_INVALID)
461 		tgt->tgt_dev_id = tgtid;
462 	else
463 		VERIFY3U(tgt->tgt_dev_id, ==, tgtid);
464 	rw_exit(&tgt->tgt_lock);
465 
466 	/* Get basic INQUIRY data from device. */
467 	inq_bp = lmrc_raid_send_inquiry(lmrc, tgt, 0, 0);
468 	if (inq_bp == NULL)
469 		goto fail;
470 
471 	/* Get VPD 83 from INQUIRY. */
472 	inq83_bp = lmrc_raid_send_inquiry(lmrc, tgt, 1, 0x83);
473 	if (inq83_bp == NULL)
474 		goto fail;
475 
476 	/* Try to turn the VPD83 data into a devid. */
477 	ret = ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION1,
478 	    NULL, (uchar_t *)inq_bp->b_un.b_addr, sizeof (struct scsi_inquiry),
479 	    NULL, 0, (uchar_t *)inq83_bp->b_un.b_addr, inq83_bp->b_bcount,
480 	    &devid);
481 	if (ret != DDI_SUCCESS)
482 		goto fail;
483 
484 	/* Extract the GUID from the devid. */
485 	guid = ddi_devid_to_guid(devid);
486 	if (guid == NULL)
487 		goto fail;
488 
489 	/* Convert the GUID to a WWN. */
490 	(void) scsi_wwnstr_to_wwn(guid, &wwn);
491 
492 	ddi_devid_free_guid(guid);
493 
494 fail:
495 	if (inq_bp != NULL)
496 		scsi_free_consistent_buf(inq_bp);
497 	if (inq83_bp != NULL)
498 		scsi_free_consistent_buf(inq83_bp);
499 
500 	return (wwn);
501 }
502 
503 /*
504  * lmrc_raid_update_tgtmap
505  *
506  * Feed the LD target ID list into the target map. Try to get a WWN for each LD.
507  */
508 static int
509 lmrc_raid_update_tgtmap(lmrc_t *lmrc, lmrc_ld_tgtid_list_t *ld_list)
510 {
511 	int ret;
512 	int i;
513 
514 	if (ld_list->ltl_count > lmrc->l_fw_supported_vd_count)
515 		return (DDI_FAILURE);
516 
517 	ret = scsi_hba_tgtmap_set_begin(lmrc->l_raid_tgtmap);
518 	if (ret != DDI_SUCCESS)
519 		return (ret);
520 
521 	for (i = 0; i < ld_list->ltl_count; i++) {
522 		uint8_t tgtid = ld_list->ltl_tgtid[i];
523 		char name[SCSI_WWN_BUFLEN];
524 		uint64_t wwn;
525 
526 		if (tgtid > lmrc->l_fw_supported_vd_count) {
527 			dev_err(lmrc->l_dip, CE_WARN,
528 			    "!%s: invalid LD tgt id %d", __func__, tgtid);
529 			goto fail;
530 		}
531 
532 		wwn = lmrc_raid_get_wwn(lmrc, tgtid);
533 		if (wwn != 0)
534 			(void) scsi_wwn_to_wwnstr(wwn, 0, name);
535 		else
536 			(void) snprintf(name, sizeof (name), "%d", tgtid);
537 
538 		ret = scsi_hba_tgtmap_set_add(lmrc->l_raid_tgtmap,
539 		    SCSI_TGT_SCSI_DEVICE, name, &lmrc->l_targets[tgtid]);
540 
541 		if (ret != DDI_SUCCESS)
542 			goto fail;
543 	}
544 
545 	return (scsi_hba_tgtmap_set_end(lmrc->l_raid_tgtmap, 0));
546 
547 fail:
548 	(void) scsi_hba_tgtmap_set_flush(lmrc->l_raid_tgtmap);
549 	return (DDI_FAILURE);
550 }
551 
552 /*
553  * lmrc_get_ld_list
554  *
555  * Query the controller for a list of currently known LDs. Use the information
556  * to update the target map.
557  */
558 int
559 lmrc_get_ld_list(lmrc_t *lmrc)
560 {
561 	lmrc_mfi_dcmd_payload_t *dcmd;
562 	lmrc_mfi_cmd_t *mfi;
563 	int ret;
564 
565 	mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, LMRC_DCMD_LD_LIST_QUERY,
566 	    sizeof (lmrc_ld_tgtid_list_t) + lmrc->l_fw_supported_vd_count, 1);
567 
568 	if (mfi == NULL)
569 		return (DDI_FAILURE);
570 
571 	dcmd = &mfi->mfi_frame->mf_dcmd;
572 	dcmd->md_mbox_8[0] = LMRC_LD_QUERY_TYPE_EXPOSED_TO_HOST;
573 
574 	if (lmrc->l_max_256_vd_support)
575 		dcmd->md_mbox_8[2] = 1;
576 
577 	ret = lmrc_issue_blocked_mfi(lmrc, mfi);
578 
579 	if (ret != DDI_SUCCESS)
580 		goto out;
581 
582 	ret = lmrc_raid_update_tgtmap(lmrc, mfi->mfi_data_dma.ld_buf);
583 
584 out:
585 	lmrc_put_dcmd(lmrc, mfi);
586 	return (ret);
587 }
588 
589 /*
590  * lmrc_raid_aen_handler
591  *
592  * Handle AENs with locale code LMRC_EVT_LOCALE_LD. If the LD configuration
593  * changed, update the LD list and target map.
594  */
595 int
596 lmrc_raid_aen_handler(lmrc_t *lmrc, lmrc_evt_t *evt)
597 {
598 	int ret = DDI_SUCCESS;
599 
600 	switch (evt->evt_code) {
601 	case LMRC_EVT_LD_CC_STARTED:
602 	case LMRC_EVT_LD_CC_PROGRESS:
603 	case LMRC_EVT_LD_CC_COMPLETE:
604 		/*
605 		 * Consistency Check. I/O is possible during consistency check,
606 		 * so there's no need to do anything.
607 		 */
608 		break;
609 
610 	case LMRC_EVT_LD_FAST_INIT_STARTED:
611 	case LMRC_EVT_LD_FULL_INIT_STARTED:
612 		/*
613 		 * A LD initialization process has been started.
614 		 */
615 		ret = lmrc_get_ld_list(lmrc);
616 		break;
617 
618 	case LMRC_EVT_LD_BG_INIT_PROGRESS:
619 	case LMRC_EVT_LD_INIT_PROGRESS:
620 		/*
621 		 * FULL INIT reports these for every percent of completion.
622 		 * Ignore.
623 		 */
624 		break;
625 
626 	case LMRC_EVT_LD_INIT_ABORTED:
627 	case LMRC_EVT_LD_INIT_COMPLETE:
628 		/*
629 		 * The LD initialization has ended, one way or another.
630 		 */
631 		ret = lmrc_get_ld_list(lmrc);
632 		break;
633 
634 	case LMRC_EVT_LD_BBT_CLEARED:
635 		/*
636 		 * The Bad Block Table for the LD has been cleared. This usually
637 		 * follows a INIT_COMPLETE, but may occur in other situations.
638 		 * Ignore.
639 		 */
640 		break;
641 
642 	case LMRC_EVT_LD_PROP_CHANGED:
643 		/*
644 		 * Happens when LD props are changed, such as setting the
645 		 * "hidden" property. There's little we can do here as we
646 		 * don't which property changed which way. In any case,
647 		 * this is usually followed by a HOST BUS SCAN REQD which
648 		 * will handle any changes.
649 		 */
650 		break;
651 
652 	case LMRC_EVT_LD_OFFLINE:
653 		/*
654 		 * Not sure when this happens, but since the LD is offline we
655 		 * should just remove it from the target map.
656 		 */
657 		ret = lmrc_get_ld_list(lmrc);
658 		break;
659 
660 	case LMRC_EVT_LD_DELETED:
661 		/*
662 		 * A LD was deleted, remove it from target map.
663 		 */
664 		ret = lmrc_get_ld_list(lmrc);
665 		break;
666 
667 	case LMRC_EVT_LD_OPTIMAL:
668 		/*
669 		 * There might be several cases when this event occurs,
670 		 * in particular when a LD is created. In that case it's the
671 		 * first of several events, so we can ignore it.
672 		 */
673 		break;
674 
675 	case LMRC_EVT_LD_CREATED:
676 		/*
677 		 * This is the 2nd event generated when a LD is created, and
678 		 * it's the one FreeBSD and Linux act on. Add the LD to the
679 		 * target map.
680 		 */
681 		ret = lmrc_get_ld_list(lmrc);
682 		break;
683 
684 	case LMRC_EVT_LD_AVAILABLE:
685 		/*
686 		 * This event happens last when a LD is created, but there may
687 		 * be other scenarios where this occurs. Ignore it for now.
688 		 */
689 		break;
690 
691 	case LMRC_EVT_LD_STATE_CHANGE:
692 		/*
693 		 * Not sure when this happens, but updating the LD list is
694 		 * probably a good idea.
695 		 */
696 		ret = lmrc_get_ld_list(lmrc);
697 		break;
698 
699 	default:
700 		ret = DDI_FAILURE;
701 	}
702 
703 	return (ret);
704 }
705 
706 int
707 lmrc_raid_attach(dev_info_t *dip)
708 {
709 	scsi_hba_tran_t *tran = ddi_get_driver_private(dip);
710 	dev_info_t *pdip = ddi_get_parent(dip);
711 	lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
712 	int ret;
713 
714 	VERIFY(tran != NULL);
715 	VERIFY(lmrc != NULL);
716 
717 	if (lmrc->l_fw_fault)
718 		return (DDI_FAILURE);
719 
720 	tran->tran_hba_private = lmrc;
721 	lmrc->l_raid_dip = dip;
722 
723 	ret = scsi_hba_tgtmap_create(dip, SCSI_TM_FULLSET, MICROSEC,
724 	    2 * MICROSEC, lmrc, lmrc_raid_tgt_activate_cb,
725 	    lmrc_raid_tgt_deactivate_cb, &lmrc->l_raid_tgtmap);
726 	if (ret != DDI_SUCCESS)
727 		return (ret);
728 
729 	ret = lmrc_setup_raidmap(lmrc);
730 	if (ret != DDI_SUCCESS) {
731 		dev_err(lmrc->l_dip, CE_WARN, "!RAID map setup failed.");
732 		return (DDI_FAILURE);
733 	}
734 
735 	ret = lmrc_get_ld_list(lmrc);
736 	if (ret != DDI_SUCCESS) {
737 		dev_err(lmrc->l_dip, CE_WARN, "!Failed to get LD list.");
738 		return (ret);
739 	}
740 
741 	return (DDI_SUCCESS);
742 }
743 
744 int
745 lmrc_raid_detach(dev_info_t *dip)
746 {
747 	dev_info_t *pdip = ddi_get_parent(dip);
748 	lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
749 
750 	VERIFY(lmrc != NULL);
751 
752 	if (lmrc->l_raid_tgtmap != NULL) {
753 		scsi_hba_tgtmap_destroy(lmrc->l_raid_tgtmap);
754 		lmrc->l_raid_tgtmap = NULL;
755 	}
756 
757 	lmrc->l_raid_dip = NULL;
758 
759 	return (DDI_SUCCESS);
760 }
761