xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/lmrc/lmrc.c (revision b210e77709da8e42dfe621e10ccf4be504206058)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2023 Racktop Systems, Inc.
14  */
15 
16 /*
17  * This file implements the interfaces for communicating with the MegaRAID HBA.
18  * There are three basic interfaces:
19  * - the device registers, which provide basic information about the controller
20  *   hardware and the features it supports, as well as control registers used
21  *   during sending and reception of I/O frames
22  * - Fusion-MPT v2.5, perhaps later, which defines the format of the I/O frames
23  *   used for communicating with the HBA and virtual and physical devices that
24  *   are attached to it
25  * - MFI, the MegaRAID Firmware Interface, which are sent and received as MPT
26  *   payloads to control and communicate with the RAID controller.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/scsi/scsi.h>
33 
34 #include <sys/cpuvar.h>
35 
36 #include "lmrc.h"
37 #include "lmrc_reg.h"
38 #include "lmrc_raid.h"
39 #include "lmrc_phys.h"
40 
41 static uint32_t lmrc_read_reg(lmrc_t *, uint32_t);
42 static void lmrc_write_reg(lmrc_t *, uint32_t, uint32_t);
43 static int lmrc_transition_to_ready(lmrc_t *);
44 static void lmrc_process_mptmfi_passthru(lmrc_t *, lmrc_mpt_cmd_t *);
45 static void lmrc_build_mptmfi_passthru(lmrc_t *, lmrc_mfi_cmd_t *);
46 static int lmrc_poll_mfi(lmrc_t *, lmrc_mfi_cmd_t *, uint8_t);
47 static boolean_t lmrc_check_fw_fault(lmrc_t *);
48 static int lmrc_get_event_log_info(lmrc_t *, lmrc_evt_log_info_t *);
49 static void lmrc_aen_handler(void *);
50 static void lmrc_complete_aen(lmrc_t *, lmrc_mfi_cmd_t *);
51 static int lmrc_register_aen(lmrc_t *, uint32_t);
52 
53 /*
54  * Device register access functions.
55  *
56  * Due to the way ddi_get* and ddi_put* work, we'll need to calculate the
57  * absolute virtual address of the registers ourselves.
58  *
59  * For read accesses, employ a erratum workaround for Aero controllers. In some
60  * cases, reads of certain registers will intermittently return all zeros. As a
61  * workaround, retry the read up to three times until a non-zero value is read.
62  * Supposedly this is enough, every other driver I looked at does this.
63  */
64 static uint32_t
65 lmrc_read_reg_1(lmrc_t *lmrc, uint32_t reg)
66 {
67 	uint32_t *addr = (uint32_t *)((uintptr_t)lmrc->l_regmap + reg);
68 	return (ddi_get32(lmrc->l_reghandle, addr));
69 }
70 
71 static uint32_t
72 lmrc_read_reg(lmrc_t *lmrc, uint32_t reg)
73 {
74 	if (lmrc->l_class != LMRC_ACLASS_AERO)
75 		return (lmrc_read_reg_1(lmrc, reg));
76 
77 	/* Workaround for the hardware erratum in Aero controllers */
78 	for (uint_t i = 0; i < 3; i++) {
79 		uint32_t val = lmrc_read_reg_1(lmrc, reg);
80 
81 		if (val != 0)
82 			return (val);
83 	}
84 
85 	return (0);
86 }
87 
88 static void
89 lmrc_write_reg(lmrc_t *lmrc, uint32_t reg, uint32_t val)
90 {
91 	uint32_t *addr = (uint32_t *)((uintptr_t)lmrc->l_regmap + reg);
92 	ddi_put32(lmrc->l_reghandle, addr, val);
93 }
94 
95 static void
96 lmrc_write_reg64(lmrc_t *lmrc, uint32_t reg, uint64_t val)
97 {
98 	uint64_t *addr = (uint64_t *)((uintptr_t)lmrc->l_regmap + reg);
99 	ddi_put64(lmrc->l_reghandle, addr, val);
100 }
101 
102 /*
103  * Interrupt control
104  *
105  * The hardware supports 4 interrupt registers:
106  * - inbound interrupt status
107  * - inbound interrupt mask
108  * - outbound interrupt status
109  * - outbound interrupt mask
110  *
111  * The following code uses only the outbound interrupt registers, the function
112  * and use of inbound interrupt register is unknown.
113  */
114 void
115 lmrc_disable_intr(lmrc_t *lmrc)
116 {
117 	uint32_t mask = 0xFFFFFFFF;
118 
119 	lmrc_write_reg(lmrc, LMRC_OB_INTR_MASK, mask);
120 	/* Dummy read to force pci flush */
121 	(void) lmrc_read_reg(lmrc, LMRC_OB_INTR_MASK);
122 }
123 
124 void
125 lmrc_enable_intr(lmrc_t *lmrc)
126 {
127 	uint32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
128 
129 	lmrc_write_reg(lmrc, LMRC_OB_INTR_STATUS, ~0);
130 	(void) lmrc_read_reg(lmrc, LMRC_OB_INTR_STATUS);
131 
132 	lmrc_write_reg(lmrc, LMRC_OB_INTR_MASK, ~mask);
133 	(void) lmrc_read_reg(lmrc, LMRC_OB_INTR_MASK);
134 }
135 
136 uint_t
137 lmrc_intr_ack(lmrc_t *lmrc)
138 {
139 	uint32_t status;
140 
141 	status = lmrc_read_reg(lmrc, LMRC_OB_INTR_STATUS);
142 
143 	if ((status & MFI_FUSION_ENABLE_INTERRUPT_MASK) == 0)
144 		return (DDI_INTR_UNCLAIMED);
145 
146 	if (lmrc_check_acc_handle(lmrc->l_reghandle) != DDI_SUCCESS) {
147 		ddi_fm_service_impact(lmrc->l_dip, DDI_SERVICE_LOST);
148 		return (DDI_INTR_UNCLAIMED);
149 	}
150 
151 	return (DDI_INTR_CLAIMED);
152 }
153 
154 /*
155  * Fusion-MPT requests
156  *
157  * The controller expects to have access to a large chunk of DMA memory, into
158  * which the driver writes fixed-size I/O requests for the controller to
159  * process. To notify the hardware about a new request, a request descriptor is
160  * written to the queue port registers which includes the SMID of the request.
161  * This memory isn't really a queue, though, as it seems there are no
162  * constraints about ordering of the requests. All that matters is that there
163  * is a valid request at the address that corresponds with the SMID in the
164  * descriptor.
165  *
166  * If the hardware supports MPI 2.6 atomic request descriptors, which are a
167  * 32bit subset of the 64bit MPI 2.0/2.5 request descriptors, the descriptor is
168  * sent to the controller in a single 32bit write into a device register.
169  *
170  * For all other descriptor types, we'll employ a 64bit write to the queue
171  * registers, assuming that provides the required atomicity.
172  */
173 void
174 lmrc_send_atomic_request(lmrc_t *lmrc, lmrc_atomic_req_desc_t req_desc)
175 {
176 	if (lmrc->l_atomic_desc_support) {
177 		lmrc_write_reg(lmrc, LMRC_IB_SINGLE_QUEUE_PORT,
178 		    req_desc.rd_reg);
179 	} else {
180 		lmrc_req_desc_t rd;
181 
182 		bzero(&rd, sizeof (rd));
183 		rd.rd_atomic = req_desc;
184 
185 		lmrc_send_request(lmrc, rd);
186 	}
187 }
188 
189 void
190 lmrc_send_request(lmrc_t *lmrc, lmrc_req_desc_t req_desc)
191 {
192 	lmrc_write_reg64(lmrc, LMRC_IB_LO_QUEUE_PORT, req_desc.rd_reg);
193 }
194 
195 lmrc_atomic_req_desc_t
196 lmrc_build_atomic_request(lmrc_t *lmrc, lmrc_mpt_cmd_t *mpt, uint8_t flags)
197 {
198 	lmrc_atomic_req_desc_t req_desc;
199 
200 	VERIFY3U(mpt->mpt_smid, !=, 0);
201 
202 	/*
203 	 * Select the reply queue based on the CPU id to distribute reply load
204 	 * among queues.
205 	 */
206 	mpt->mpt_queue = CPU->cpu_id % lmrc->l_max_reply_queues;
207 
208 	bzero(&req_desc, sizeof (req_desc));
209 
210 	req_desc.rd_atomic.RequestFlags = flags;
211 	req_desc.rd_atomic.MSIxIndex = mpt->mpt_queue;
212 	req_desc.rd_atomic.SMID = mpt->mpt_smid;
213 
214 	return (req_desc);
215 }
216 
217 /*
218  * Reply Processing
219  *
220  * The controller will post replies to completed requests in the DMA memory
221  * provided for that purpose. This memory is divided in equally-sized chunks,
222  * each being a separate reply queue that is also associated with an interrupt
223  * vector. The replies are fixed size structures and will be written by the
224  * hardware in order of completion into the queue. For each queue, there is a
225  * register to tell the hardware which replies have been consumed by the driver.
226  *
227  * In response to an interrupt, the driver will walk the reply queue associated
228  * with the interrupt vector at the last known position and processess all
229  * completed replies. After a number of replies has been processed, or if no
230  * more replies are ready to be processed, the controller will be notified about
231  * the last reply index to be processed by writing the appropriate register.
232  */
233 
234 /*
235  * lmrc_get_next_reply_desc
236  *
237  * Get the next unprocessed reply descriptor for a queue, or NULL if there is
238  * none.
239  */
240 static Mpi2ReplyDescriptorsUnion_t *
241 lmrc_get_next_reply_desc(lmrc_t *lmrc, int queue)
242 {
243 	Mpi2ReplyDescriptorsUnion_t *desc;
244 
245 	desc = lmrc->l_reply_dma.ld_buf;
246 
247 	desc += (queue * lmrc->l_reply_alloc_sz) / sizeof (*desc);
248 	desc += lmrc->l_last_reply_idx[queue];
249 
250 	VERIFY3S(ddi_dma_sync(lmrc->l_reply_dma.ld_hdl,
251 	    (void *)desc - lmrc->l_reply_dma.ld_buf, sizeof (*desc),
252 	    DDI_DMA_SYNC_FORKERNEL), ==, DDI_SUCCESS);
253 
254 	/*
255 	 * Check if this is an unused reply descriptor, indicating that
256 	 * we've reached the end of replies in this queue.
257 	 *
258 	 * Even if the descriptor is only "half unused" we can't use it.
259 	 */
260 	if (desc->Words.Low == MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK ||
261 	    desc->Words.High == MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK)
262 		return (NULL);
263 
264 	/* advance last reply index, wrap around if necessary */
265 	lmrc->l_last_reply_idx[queue]++;
266 	if (lmrc->l_last_reply_idx[queue] >= lmrc->l_reply_q_depth)
267 		lmrc->l_last_reply_idx[queue] = 0;
268 
269 	return (desc);
270 }
271 
272 /*
273  * lmrc_write_rphi
274  *
275  * Write the Reply Post Host Index register for queue.
276  */
277 static void
278 lmrc_write_rphi(lmrc_t *lmrc, uint32_t queue)
279 {
280 	int reg = 0;
281 	uint32_t val = (queue << 24) | lmrc->l_last_reply_idx[queue];
282 
283 	if (lmrc->l_intr_type != DDI_INTR_TYPE_MSIX)
284 		VERIFY3U(queue, ==, 0);
285 
286 	if (lmrc->l_msix_combined) {
287 		reg = queue / 8;
288 		val &= 0x07ffffff;
289 	}
290 
291 	lmrc_write_reg(lmrc, lmrc->l_rphi[reg], val);
292 }
293 
294 /*
295  * lmrc_process_mpt_pkt
296  *
297  * Process a reply to a MPT IO request. Update the scsi_pkt according to status,
298  * ex_status, and data_len, setting up the ARQ pkt if necessary.
299  */
300 static void
301 lmrc_process_mpt_pkt(lmrc_t *lmrc, struct scsi_pkt *pkt, uint8_t status,
302     uint8_t ex_status, uint32_t data_len)
303 {
304 	pkt->pkt_statistics = 0;
305 	pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
306 	    STATE_XFERRED_DATA | STATE_GOT_STATUS;
307 
308 	pkt->pkt_resid = pkt->pkt_dma_len - data_len;
309 
310 	switch (status) {
311 	case MFI_STAT_OK:
312 	case MFI_STAT_LD_CC_IN_PROGRESS:
313 	case MFI_STAT_LD_RECON_IN_PROGRESS:
314 		pkt->pkt_reason = CMD_CMPLT;
315 		pkt->pkt_scbp[0] = STATUS_GOOD;
316 		break;
317 
318 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
319 	case MFI_STAT_LD_LBA_OUT_OF_RANGE: {
320 		struct scsi_arq_status *arq =
321 		    (struct scsi_arq_status *)pkt->pkt_scbp;
322 
323 		pkt->pkt_reason = CMD_CMPLT;
324 		arq->sts_status.sts_chk = 1;
325 
326 		pkt->pkt_state |= STATE_ARQ_DONE;
327 		arq->sts_rqpkt_reason = CMD_CMPLT;
328 		arq->sts_rqpkt_resid = 0;
329 		arq->sts_rqpkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET |
330 		    STATE_SENT_CMD | STATE_XFERRED_DATA;
331 		*(uint8_t *)&arq->sts_rqpkt_status = STATUS_GOOD;
332 		break;
333 	}
334 	case MFI_STAT_LD_OFFLINE:
335 	case MFI_STAT_DEVICE_NOT_FOUND:
336 		pkt->pkt_reason = CMD_DEV_GONE;
337 		pkt->pkt_statistics = STAT_DISCON;
338 		break;
339 
340 	default:
341 		dev_err(lmrc->l_dip, CE_PANIC, "!command failed, status = %x, "
342 		    "ex_status = %x, cdb[0] = %x", status, ex_status,
343 		    pkt->pkt_cdbp[0]);
344 		pkt->pkt_reason = CMD_TRAN_ERR;
345 		break;
346 	}
347 }
348 
349 /*
350  * lmrc_poll_for_reply
351  *
352  * During a panic we'll have to resort to polled I/O to write core dumps.
353  * Repeatedly check the reply queue for a new reply associated with the
354  * given request descriptor and complete it, or return an error if we get
355  * no reply within a reasonable time.
356  */
357 int
358 lmrc_poll_for_reply(lmrc_t *lmrc, lmrc_mpt_cmd_t *mpt)
359 {
360 	clock_t max_wait = LMRC_IO_TIMEOUT * MILLISEC * 10;
361 	Mpi25SCSIIORequest_t *io_req = mpt->mpt_io_frame;
362 	Mpi2ReplyDescriptorsUnion_t *desc;
363 	uint16_t desc_smid;
364 
365 	VERIFY(ddi_in_panic());
366 
367 	/*
368 	 * Walk the reply queue. Discard entries which we aren't
369 	 * looking for.
370 	 */
371 	do {
372 		desc = lmrc_get_next_reply_desc(lmrc, mpt->mpt_queue);
373 		if (desc == NULL) {
374 			if (max_wait == 0)
375 				return (TRAN_FATAL_ERROR);
376 
377 			drv_usecwait(100);
378 			max_wait--;
379 			continue;
380 		}
381 
382 		desc_smid = desc->SCSIIOSuccess.SMID;
383 
384 		/* reset descriptor */
385 		desc->Words.Low = MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK;
386 		desc->Words.High = MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK;
387 
388 		lmrc_write_rphi(lmrc, mpt->mpt_queue);
389 	} while (desc == NULL || desc_smid != mpt->mpt_smid);
390 
391 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
392 	    (void *)io_req - lmrc->l_ioreq_dma.ld_buf,
393 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORKERNEL),
394 	    ==, DDI_SUCCESS);
395 
396 	/* If this is I/O, process it. */
397 	if (io_req->Function == LMRC_MPI2_FUNCTION_LD_IO_REQUEST ||
398 	    io_req->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
399 		lmrc_process_mpt_pkt(lmrc, mpt->mpt_pkt,
400 		    io_req->VendorRegion.rc_status,
401 		    io_req->VendorRegion.rc_exstatus, io_req->DataLength);
402 	}
403 
404 	return (TRAN_ACCEPT);
405 }
406 
407 /*
408  * lmrc_process_replies
409  *
410  * Process all new reply entries in a queue in response to an interrupt.
411  */
412 int
413 lmrc_process_replies(lmrc_t *lmrc, uint8_t queue)
414 {
415 	int nprocessed = 0;
416 	Mpi2ReplyDescriptorsUnion_t *desc;
417 
418 	for (desc = lmrc_get_next_reply_desc(lmrc, queue);
419 	    desc != NULL;
420 	    desc = lmrc_get_next_reply_desc(lmrc, queue)) {
421 		Mpi2SCSIIOSuccessReplyDescriptor_t *reply =
422 		    &desc->SCSIIOSuccess;
423 		uint16_t smid = reply->SMID;
424 		lmrc_mpt_cmd_t *mpt = lmrc->l_mpt_cmds[smid - 1];
425 		lmrc_tgt_t *tgt = NULL;
426 		Mpi25SCSIIORequest_t *io_req;
427 		struct scsi_pkt *pkt;
428 		struct scsi_device *sd;
429 
430 		VERIFY3U(reply->SMID, <=, lmrc->l_max_fw_cmds);
431 
432 		mutex_enter(&mpt->mpt_lock);
433 		mpt->mpt_complete = B_TRUE;
434 		pkt = mpt->mpt_pkt;
435 		io_req = mpt->mpt_io_frame;
436 
437 		VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
438 		    (void *)io_req - lmrc->l_ioreq_dma.ld_buf,
439 		    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE,
440 		    DDI_DMA_SYNC_FORKERNEL), ==, DDI_SUCCESS);
441 
442 
443 		switch (io_req->Function) {
444 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
445 			VERIFY0(pkt);
446 			VERIFY0(list_link_active(&mpt->mpt_node));
447 			cv_signal(&mpt->mpt_cv);
448 			break;
449 
450 		case MPI2_FUNCTION_SCSI_IO_REQUEST:
451 		case LMRC_MPI2_FUNCTION_LD_IO_REQUEST:
452 			VERIFY(pkt != NULL);
453 
454 			sd = scsi_address_device(&pkt->pkt_address);
455 			VERIFY(sd != NULL);
456 
457 			tgt = scsi_device_hba_private_get(sd);
458 			VERIFY(tgt != NULL);
459 
460 			lmrc_process_mpt_pkt(lmrc, pkt,
461 			    io_req->VendorRegion.rc_status,
462 			    io_req->VendorRegion.rc_exstatus,
463 			    io_req->DataLength);
464 
465 			break;
466 
467 		case LMRC_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:
468 			VERIFY0(pkt);
469 			VERIFY0(list_link_active(&mpt->mpt_node));
470 			lmrc_process_mptmfi_passthru(lmrc, mpt);
471 			break;
472 
473 		default:
474 			mutex_exit(&mpt->mpt_lock);
475 			dev_err(lmrc->l_dip, CE_PANIC,
476 			    "!reply received for unknown Function %x",
477 			    io_req->Function);
478 			break;
479 		}
480 
481 		mutex_exit(&mpt->mpt_lock);
482 
483 		if (pkt != NULL) {
484 			lmrc_tgt_rem_active_mpt(tgt, mpt);
485 			atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
486 			scsi_hba_pkt_comp(pkt);
487 		}
488 
489 		/* reset descriptor */
490 		desc->Words.Low = MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK;
491 		desc->Words.High = MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK;
492 
493 		nprocessed++;
494 
495 		if (nprocessed % LMRC_THRESHOLD_REPLY_COUNT == 0)
496 			lmrc_write_rphi(lmrc, queue);
497 	}
498 
499 	if (nprocessed != 0 && nprocessed % LMRC_THRESHOLD_REPLY_COUNT != 0)
500 		lmrc_write_rphi(lmrc, queue);
501 
502 	return (DDI_INTR_CLAIMED);
503 }
504 
505 
506 /*
507  * MFI - MegaRAID Firmware Interface
508  */
509 
510 /*
511  * lmrc_build_mptmfi_passthru
512  *
513  * MFI commands are send as MPT MFI passthrough I/O requests. To send a a MFI
514  * frame to the RAID controller, we need to get a MPT command, set up the MPT
515  * I/O request and build a one-entry SGL pointing to the MFI command.
516  */
517 static void
518 lmrc_build_mptmfi_passthru(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
519 {
520 	Mpi25SCSIIORequest_t *io_req;
521 	const ddi_dma_cookie_t *cookie;
522 	lmrc_mpt_cmd_t *mpt;
523 
524 	mpt = lmrc_get_mpt(lmrc);
525 	ASSERT(mutex_owned(&mpt->mpt_lock));
526 
527 	mfi->mfi_mpt = mpt;
528 	mpt->mpt_mfi = mfi;
529 
530 	io_req = mpt->mpt_io_frame;
531 	io_req->Function = LMRC_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
532 	io_req->ChainOffset = lmrc->l_chain_offset_mfi_pthru;
533 
534 	cookie = ddi_dma_cookie_one(mfi->mfi_frame_dma.ld_hdl);
535 	lmrc_dma_build_sgl(lmrc, mpt, cookie, 1);
536 
537 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
538 	    (void *)io_req - lmrc->l_ioreq_dma.ld_buf,
539 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORDEV),
540 	    ==, DDI_SUCCESS);
541 }
542 
543 /*
544  * lmrc_process_mptmfi_passthru
545  *
546  * When a MPT MFI passthrough command completes, invoke the callback if there
547  * is one. Panic if an invalid command completed as that should never happen.
548  */
549 static void
550 lmrc_process_mptmfi_passthru(lmrc_t *lmrc, lmrc_mpt_cmd_t *mpt)
551 {
552 	lmrc_mfi_cmd_t *mfi;
553 	lmrc_mfi_header_t *hdr;
554 
555 	VERIFY3P(mpt->mpt_mfi, !=, NULL);
556 	mfi = mpt->mpt_mfi;
557 	VERIFY0(list_link_active(&mfi->mfi_node));
558 
559 	hdr = &mfi->mfi_frame->mf_hdr;
560 
561 	if ((hdr->mh_flags & MFI_FRAME_DIR_READ) != 0)
562 		(void) ddi_dma_sync(mfi->mfi_data_dma.ld_hdl, 0,
563 		    mfi->mfi_data_dma.ld_len, DDI_DMA_SYNC_FORKERNEL);
564 
565 	switch (hdr->mh_cmd) {
566 	case MFI_CMD_DCMD:
567 	case MFI_CMD_LD_SCSI_IO:
568 	case MFI_CMD_PD_SCSI_IO:
569 	case MFI_CMD_ABORT:
570 		mutex_enter(&mfi->mfi_lock);
571 		if (mfi->mfi_callback != NULL)
572 			mfi->mfi_callback(lmrc, mfi);
573 		mutex_exit(&mfi->mfi_lock);
574 		break;
575 
576 	case MFI_CMD_INVALID:
577 	default:
578 		dev_err(lmrc->l_dip, CE_PANIC,
579 		    "!invalid MFI cmd completion received, cmd = %x",
580 		    hdr->mh_cmd);
581 		break;
582 	}
583 }
584 
585 /*
586  * lmrc_issue_mfi
587  *
588  * Post a MFI command to the firmware. Reset the cmd_status to invalid. Build
589  * a MPT MFI passthru command if necessary and a MPT atomic request descriptor
590  * before posting the request. The MFI command's mutex must be held. If the MPT
591  * MFI passthru command already exists for the MFI command, the MPT command's
592  * mutex must be held, too, and we don't drop it on return.
593  */
594 void
595 lmrc_issue_mfi(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi, lmrc_mfi_cmd_cb_t *cb)
596 {
597 	boolean_t exit_mutex = B_FALSE;
598 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
599 	lmrc_atomic_req_desc_t req_desc;
600 
601 	ASSERT(mutex_owned(&mfi->mfi_lock));
602 
603 	if ((hdr->mh_flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) == 0) {
604 		VERIFY3U(cb, !=, NULL);
605 		mfi->mfi_callback = cb;
606 	} else {
607 		VERIFY3U(cb, ==, NULL);
608 	}
609 
610 	hdr->mh_cmd_status = MFI_STAT_INVALID_STATUS;
611 	if (mfi->mfi_mpt == NULL) {
612 		exit_mutex = B_TRUE;
613 		lmrc_build_mptmfi_passthru(lmrc, mfi);
614 	}
615 
616 	ASSERT(mutex_owned(&mfi->mfi_mpt->mpt_lock));
617 
618 	req_desc = lmrc_build_atomic_request(lmrc, mfi->mfi_mpt,
619 	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO);
620 
621 	(void) ddi_dma_sync(mfi->mfi_frame_dma.ld_hdl, 0,
622 	    mfi->mfi_frame_dma.ld_len, DDI_DMA_SYNC_FORDEV);
623 
624 	if ((hdr->mh_flags & MFI_FRAME_DIR_WRITE) != 0)
625 		(void) ddi_dma_sync(mfi->mfi_data_dma.ld_hdl, 0,
626 		    mfi->mfi_data_dma.ld_len, DDI_DMA_SYNC_FORDEV);
627 
628 	lmrc_send_atomic_request(lmrc, req_desc);
629 	if (exit_mutex)
630 		mutex_exit(&mfi->mfi_mpt->mpt_lock);
631 }
632 
633 /*
634  * lmrc_poll_mfi
635  *
636  * Poll a MFI command for completion, waiting up to max_wait secs. Repeatedly
637  * check the command status until it changes to something that is not invalid.
638  *
639  * Trigger an online controller reset on timeout.
640  */
641 static int
642 lmrc_poll_mfi(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi, uint8_t max_wait)
643 {
644 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
645 	lmrc_dma_t *dma = &mfi->mfi_frame_dma;
646 	clock_t timeout = ddi_get_lbolt() + drv_usectohz(max_wait * MICROSEC);
647 	clock_t now;
648 
649 	ASSERT(mutex_owned(&mfi->mfi_lock));
650 
651 	do {
652 		(void) ddi_dma_sync(dma->ld_hdl, 0, dma->ld_len,
653 		    DDI_DMA_SYNC_FORKERNEL);
654 		if (hdr->mh_cmd_status != MFI_STAT_INVALID_STATUS)
655 			break;
656 
657 		(void) cv_reltimedwait(&mfi->mfi_cv, &mfi->mfi_lock,
658 		    drv_usectohz(MILLISEC), TR_MILLISEC);
659 		now = ddi_get_lbolt();
660 	} while (!lmrc->l_fw_fault && now <= timeout);
661 
662 	if (hdr->mh_cmd_status != MFI_STAT_INVALID_STATUS)
663 		return (DDI_SUCCESS);
664 
665 	if (now > timeout) {
666 		dev_err(lmrc->l_dip, CE_WARN,
667 		    "!%s: command timeout after %ds", __func__, max_wait);
668 
669 		/*
670 		 * Signal the housekeeping thread to check for FW/HW faults,
671 		 * performing a reset if necessary.
672 		 */
673 		cv_signal(&lmrc->l_thread_cv);
674 	}
675 
676 	return (DDI_FAILURE);
677 }
678 
679 /*
680  * lmrc_wait_mfi
681  *
682  * Wait for up to max_wait secs for a MFI command to complete. The cmd mutex
683  * must be held.
684  *
685  * Trigger an online controller reset on timeout.
686  */
687 int
688 lmrc_wait_mfi(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi, uint8_t max_wait)
689 {
690 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
691 	lmrc_dma_t *dma = &mfi->mfi_frame_dma;
692 	clock_t timeout = ddi_get_lbolt() + drv_usectohz(max_wait * MICROSEC);
693 	int ret;
694 
695 	ASSERT(mutex_owned(&mfi->mfi_lock));
696 
697 	do {
698 		ret = cv_timedwait(&mfi->mfi_cv, &mfi->mfi_lock, timeout);
699 
700 		(void) ddi_dma_sync(dma->ld_hdl, 0, dma->ld_len,
701 		    DDI_DMA_SYNC_FORKERNEL);
702 
703 	} while (!lmrc->l_fw_fault &&
704 	    hdr->mh_cmd_status == MFI_STAT_INVALID_STATUS && ret != -1);
705 
706 	if (!lmrc->l_fw_fault && ret != -1)
707 		return (DDI_SUCCESS);
708 
709 	if (ret == -1) {
710 		dev_err(lmrc->l_dip, CE_WARN, "!%s: blocked command timeout "
711 		    "after %ds, cmd = %d, status = %d", __func__, max_wait,
712 		    hdr->mh_cmd, hdr->mh_cmd_status);
713 
714 		/*
715 		 * Signal the housekeeping thread to check for FW/HW faults,
716 		 * performing a reset if necessary.
717 		 */
718 		cv_signal(&lmrc->l_thread_cv);
719 	}
720 
721 	return (DDI_FAILURE);
722 }
723 
724 /*
725  * lmrc_wakeup_mfi
726  *
727  * Signal the CV associated with a MFI command to wake up the thread waiting
728  * for its completion.
729  */
730 void
731 lmrc_wakeup_mfi(lmrc_t *lmrc, lmrc_mfi_cmd_t *cmd)
732 {
733 	ASSERT(mutex_owned(&cmd->mfi_lock));
734 	cv_signal(&cmd->mfi_cv);
735 }
736 
737 /*
738  * lmrc_issue_blocked_mfi
739  *
740  * Post a MFI command to the firmware and wait for the command to complete.
741  */
742 int
743 lmrc_issue_blocked_mfi(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
744 {
745 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
746 	int ret;
747 
748 	mutex_enter(&mfi->mfi_lock);
749 	lmrc_issue_mfi(lmrc, mfi, lmrc_wakeup_mfi);
750 	ret = lmrc_wait_mfi(lmrc, mfi, LMRC_INTERNAL_CMD_WAIT_TIME);
751 	mutex_exit(&mfi->mfi_lock);
752 
753 	if (ret == DDI_SUCCESS && hdr->mh_cmd_status == MFI_STAT_OK)
754 		return (DDI_SUCCESS);
755 
756 	dev_err(lmrc->l_dip, CE_WARN,
757 	    "!%s: blocked command failure, cmd = %d, status = %d",
758 	    __func__, hdr->mh_cmd, hdr->mh_cmd_status);
759 
760 	return (ret);
761 }
762 
763 /*
764  * lmrc_abort_cb
765  *
766  * Callback for any command that is to be aborted.
767  *
768  * If the command completed normally before it could be aborted, set the status
769  * to indicate the intended abortion.
770  */
771 static void
772 lmrc_abort_cb(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
773 {
774 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
775 
776 	if (hdr->mh_cmd_status == MFI_STAT_OK)
777 		hdr->mh_cmd_status = MFI_STAT_NOT_FOUND;
778 }
779 
780 /*
781  * lmrc_abort_mfi
782  *
783  * Abort a MFI command. This is a bit tricky as the hardware may still complete
784  * it at any time.
785  *
786  * The mutex of the command to be aborted must be held to prevent it from
787  * completing behind our back. We'll replace its callback with our own, issue an
788  * ABORT command, and drop the mutex before we wait for the ABORT command to
789  * complete.
790  */
791 static int
792 lmrc_abort_cmd(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi_to_abort)
793 {
794 	lmrc_mfi_cmd_t *mfi = lmrc_get_mfi(lmrc);
795 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
796 	lmrc_mfi_abort_payload_t *abort = &mfi->mfi_frame->mf_abort;
797 	lmrc_mfi_cmd_cb_t *orig_cb = mfi_to_abort->mfi_callback;
798 	int ret;
799 
800 	ASSERT(mutex_owned(&mfi_to_abort->mfi_lock));
801 
802 	/* Replace the commands callback with our own. */
803 	mfi_to_abort->mfi_callback = lmrc_abort_cb;
804 
805 	hdr->mh_cmd = MFI_CMD_ABORT;
806 	abort->ma_abort_context = mfi_to_abort->mfi_idx;
807 	lmrc_dma_set_addr64(&mfi_to_abort->mfi_frame_dma,
808 	    &abort->ma_abort_mfi_phys_addr);
809 
810 	/* Send the ABORT. */
811 	mutex_enter(&mfi->mfi_lock);
812 	lmrc_issue_mfi(lmrc, mfi, lmrc_wakeup_mfi);
813 
814 	/*
815 	 * Drop the mutex of the command to be aborted, allowing it to proceed
816 	 * while we wait for the ABORT command to complete.
817 	 */
818 	mutex_exit(&mfi_to_abort->mfi_lock);
819 	ret = lmrc_wait_mfi(lmrc, mfi, LMRC_INTERNAL_CMD_WAIT_TIME);
820 	mutex_exit(&mfi->mfi_lock);
821 
822 	/*
823 	 * The ABORT command may fail if cmd_to_abort has completed already.
824 	 * Treat any other failure as fatal, restore the callback and fail.
825 	 */
826 	if (ret != DDI_SUCCESS && hdr->mh_cmd_status != MFI_STAT_NOT_FOUND) {
827 		mutex_enter(&mfi_to_abort->mfi_lock);
828 		mfi_to_abort->mfi_callback = orig_cb;
829 		goto out;
830 	}
831 
832 	/*
833 	 * Wait for the aborted command to complete. If we time out on this
834 	 * there's little we can do here, so we restore the callback and fail.
835 	 */
836 	mutex_enter(&mfi_to_abort->mfi_lock);
837 	ret = lmrc_poll_mfi(lmrc, mfi_to_abort, LMRC_INTERNAL_CMD_WAIT_TIME);
838 	mfi_to_abort->mfi_callback = orig_cb;
839 
840 	if (ret != DDI_SUCCESS)
841 		goto out;
842 
843 	/* Wake up anyone waiting on the aborted command. */
844 	if (mfi_to_abort->mfi_callback != NULL)
845 		mfi_to_abort->mfi_callback(lmrc, mfi_to_abort);
846 
847 out:
848 	lmrc_put_mfi(mfi);
849 	ASSERT(mutex_owned(&mfi_to_abort->mfi_lock));
850 	return (ret);
851 }
852 
853 
854 /*
855  * Controller Initialization and Housekeeping
856  */
857 
858 /*
859  * lmrc_check_fw_fault
860  *
861  * Check the firmware state. If faulted, return B_TRUE.
862  * Return B_FALSE otherwise.
863  */
864 static boolean_t
865 lmrc_check_fw_fault(lmrc_t *lmrc)
866 {
867 	uint32_t status = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(0));
868 	uint32_t fw_state = LMRC_FW_STATE(status);
869 
870 	if (fw_state == LMRC_FW_STATE_FAULT)
871 		return (B_TRUE);
872 
873 	return (B_FALSE);
874 }
875 
876 /*
877  * lmrc_wait_for_reg
878  *
879  * Repeatedly read the register and check that 'bits' match 'exp'.
880  */
881 static boolean_t
882 lmrc_wait_for_reg(lmrc_t *lmrc, uint32_t reg, uint32_t bits, uint32_t exp,
883     uint8_t max_wait)
884 {
885 	uint32_t val;
886 	int i;
887 
888 	max_wait *= MILLISEC / 100;
889 
890 	for (i = 0; i < max_wait; i++) {
891 		delay(drv_usectohz(100 * MILLISEC));
892 		val = lmrc_read_reg(lmrc, reg);
893 
894 		if ((val & bits) == exp)
895 			return (B_TRUE);
896 	}
897 
898 	return (B_FALSE);
899 }
900 
901 /*
902  * lmrc_reset_ctrl
903  *
904  * Attempt to reset the controller, if the hardware supports it.
905  * If reset is unsupported or the reset fails repeatedly, we shut the
906  * controller down.
907  */
908 static int
909 lmrc_reset_ctrl(lmrc_t *lmrc)
910 {
911 	uint32_t status, fw_state, reset_adapter;
912 	int max_wait, i;
913 
914 	if (lmrc->l_disable_online_ctrl_reset)
915 		return (DDI_FAILURE);
916 
917 	status = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(0));
918 	fw_state = LMRC_FW_STATE(status);
919 	reset_adapter = LMRC_FW_RESET_ADAPTER(status);
920 
921 	if (fw_state == LMRC_FW_STATE_FAULT && reset_adapter == 0) {
922 		dev_err(lmrc->l_dip, CE_WARN,
923 		    "FW in fault state, but reset not supported");
924 		goto out;
925 	}
926 
927 	for (i = 0; i < LMRC_MAX_RESET_TRIES; i++) {
928 		dev_err(lmrc->l_dip, CE_WARN, "resetting...");
929 
930 		/* Write the reset key sequence. */
931 		lmrc_write_reg(lmrc, LMRC_WRITE_SEQUENCE,
932 		    MPI2_WRSEQ_FLUSH_KEY_VALUE);
933 		lmrc_write_reg(lmrc, LMRC_WRITE_SEQUENCE,
934 		    MPI2_WRSEQ_1ST_KEY_VALUE);
935 		lmrc_write_reg(lmrc, LMRC_WRITE_SEQUENCE,
936 		    MPI2_WRSEQ_2ND_KEY_VALUE);
937 		lmrc_write_reg(lmrc, LMRC_WRITE_SEQUENCE,
938 		    MPI2_WRSEQ_3RD_KEY_VALUE);
939 		lmrc_write_reg(lmrc, LMRC_WRITE_SEQUENCE,
940 		    MPI2_WRSEQ_4TH_KEY_VALUE);
941 		lmrc_write_reg(lmrc, LMRC_WRITE_SEQUENCE,
942 		    MPI2_WRSEQ_5TH_KEY_VALUE);
943 		lmrc_write_reg(lmrc, LMRC_WRITE_SEQUENCE,
944 		    MPI2_WRSEQ_6TH_KEY_VALUE);
945 
946 		/* Check diag write enable. */
947 		if (!lmrc_wait_for_reg(lmrc, LMRC_HOST_DIAG,
948 		    MPI2_DIAG_DIAG_WRITE_ENABLE, MPI2_DIAG_DIAG_WRITE_ENABLE,
949 		    LMRC_RESET_TIMEOUT)) {
950 			dev_err(lmrc->l_dip, CE_WARN, "diag unlock failed");
951 			continue;
952 		}
953 
954 		/* Reset chip. */
955 		lmrc_write_reg(lmrc, LMRC_HOST_DIAG, lmrc_read_reg(lmrc,
956 		    LMRC_HOST_DIAG) | MPI2_DIAG_RESET_ADAPTER);
957 		delay(drv_usectohz(3 * MICROSEC));
958 
959 		/* Check the reset adapter bit. */
960 		if (!lmrc_wait_for_reg(lmrc, LMRC_HOST_DIAG,
961 		    MPI2_DIAG_RESET_ADAPTER, 0, LMRC_RESET_TIMEOUT)) {
962 			dev_err(lmrc->l_dip, CE_WARN, "diag reset not cleared");
963 			continue;
964 		}
965 
966 		/* Wait for the FW state to move beyond INIT. */
967 		max_wait = LMRC_IO_TIMEOUT * MILLISEC / 100;
968 		do {
969 			status = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(0));
970 			fw_state = LMRC_FW_STATE(status);
971 
972 			if (fw_state <= LMRC_FW_STATE_FW_INIT)
973 				delay(drv_usectohz(100 * MILLISEC));
974 		} while (fw_state <= LMRC_FW_STATE_FW_INIT && max_wait > 0);
975 
976 		if (fw_state <= LMRC_FW_STATE_FW_INIT) {
977 			dev_err(lmrc->l_dip, CE_WARN,
978 			    "fw state <= LMRC_FW_STATE_FW_INIT, state = %x",
979 			    fw_state);
980 			continue;
981 		}
982 
983 		return (DDI_SUCCESS);
984 	}
985 
986 	dev_err(lmrc->l_dip, CE_WARN, "reset failed");
987 out:
988 	/* Stop the controller. */
989 	lmrc_write_reg(lmrc, LMRC_DOORBELL, MFI_STOP_ADP);
990 	(void) lmrc_read_reg(lmrc, LMRC_DOORBELL);
991 
992 	return (DDI_FAILURE);
993 }
994 
995 /*
996  * lmrc_tgt_complete_cmd
997  *
998  * In case of a controller reset, complete the cmd and clean up. This is done
999  * in a taskq to avoid locking and list manipulation headaches.
1000  */
1001 static void
1002 lmrc_tgt_complete_cmd(void *arg)
1003 {
1004 	lmrc_scsa_cmd_t *cmd = arg;
1005 	struct scsi_pkt *pkt;
1006 	lmrc_t *lmrc;
1007 
1008 	mutex_enter(&cmd->sc_mpt->mpt_lock);
1009 
1010 	/* Just in case the command completed before the taskq was run... */
1011 	if (cmd->sc_mpt->mpt_complete) {
1012 		mutex_exit(&cmd->sc_mpt->mpt_lock);
1013 		return;
1014 	}
1015 
1016 	lmrc = cmd->sc_mpt->mpt_lmrc;
1017 	pkt = cmd->sc_mpt->mpt_pkt;
1018 
1019 	pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
1020 	pkt->pkt_reason = CMD_RESET;
1021 	pkt->pkt_statistics = STAT_BUS_RESET;
1022 	mutex_exit(&cmd->sc_mpt->mpt_lock);
1023 
1024 	lmrc_tgt_rem_active_mpt(cmd->sc_tgt, cmd->sc_mpt);
1025 	atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
1026 
1027 	scsi_hba_pkt_comp(pkt);
1028 }
1029 
1030 /*
1031  * lmrc_tgt_complete_cmds
1032  *
1033  * Walk the list of active commands of a target. Schedule a taskq to handle the
1034  * timeout processing and clean up.
1035  */
1036 static void
1037 lmrc_tgt_complete_cmds(lmrc_t *lmrc, lmrc_tgt_t *tgt)
1038 {
1039 	lmrc_mpt_cmd_t *mpt;
1040 
1041 	mutex_enter(&tgt->tgt_mpt_active_lock);
1042 	if (list_is_empty(&tgt->tgt_mpt_active)) {
1043 		mutex_exit(&tgt->tgt_mpt_active_lock);
1044 		return;
1045 	}
1046 
1047 	for (mpt = lmrc_tgt_first_active_mpt(tgt);
1048 	    mpt != NULL;
1049 	    mpt = lmrc_tgt_next_active_mpt(tgt, mpt)) {
1050 		lmrc_scsa_cmd_t *cmd = mpt->mpt_pkt->pkt_ha_private;
1051 
1052 		ASSERT(mutex_owned(&mpt->mpt_lock));
1053 		VERIFY(mpt->mpt_pkt != NULL);
1054 		VERIFY(cmd != NULL);
1055 
1056 		if (mpt->mpt_complete)
1057 			continue;
1058 
1059 		taskq_dispatch_ent(lmrc->l_taskq, lmrc_tgt_complete_cmd, cmd,
1060 		    TQ_NOSLEEP, &mpt->mpt_tqent);
1061 	}
1062 	mutex_exit(&tgt->tgt_mpt_active_lock);
1063 }
1064 
1065 /*
1066  * lmrc_tgt_timeout_cmds
1067  *
1068  * Walk the list of active commands of a target. Try to abort commands which are
1069  * overdue.
1070  */
1071 static int
1072 lmrc_tgt_timeout_cmds(lmrc_t *lmrc, lmrc_tgt_t *tgt)
1073 {
1074 	lmrc_mpt_cmd_t *mpt;
1075 	int ret = DDI_SUCCESS;
1076 
1077 	mutex_enter(&tgt->tgt_mpt_active_lock);
1078 	if (list_is_empty(&tgt->tgt_mpt_active))
1079 		goto out;
1080 
1081 	for (mpt = lmrc_tgt_first_active_mpt(tgt);
1082 	    mpt != NULL;
1083 	    mpt = lmrc_tgt_next_active_mpt(tgt, mpt)) {
1084 		hrtime_t now;
1085 
1086 		ASSERT(mutex_owned(&mpt->mpt_lock));
1087 		VERIFY(mpt->mpt_pkt != NULL);
1088 
1089 		/* Just in case the command completed by now... */
1090 		if (mpt->mpt_complete)
1091 			continue;
1092 
1093 		now = gethrtime();
1094 
1095 		if (now > mpt->mpt_timeout) {
1096 			/*
1097 			 * Give the packet a bit more time for the abort to
1098 			 * complete.
1099 			 */
1100 			mpt->mpt_timeout = now + LMRC_IO_TIMEOUT * NANOSEC;
1101 
1102 			/*
1103 			 * If the abort failed for whatever reason,
1104 			 * we can stop here as only a controller reset
1105 			 * can get us back into a sane state.
1106 			 */
1107 			if (lmrc_abort_mpt(lmrc, tgt, mpt) != 1) {
1108 				mutex_exit(&mpt->mpt_lock);
1109 				ret = DDI_FAILURE;
1110 				goto out;
1111 			}
1112 		}
1113 	}
1114 
1115 out:
1116 	mutex_exit(&tgt->tgt_mpt_active_lock);
1117 	return (ret);
1118 }
1119 
1120 /*
1121  * lmrc_thread
1122  *
1123  * Check whether the controller is FW fault state. Check all targets for
1124  * commands which have timed out.
1125  */
1126 void
1127 lmrc_thread(void *arg)
1128 {
1129 	lmrc_t *lmrc = arg;
1130 
1131 	do {
1132 		int i;
1133 
1134 		/* Wake up at least once a minute. */
1135 		mutex_enter(&lmrc->l_thread_lock);
1136 		(void) cv_reltimedwait(&lmrc->l_thread_cv, &lmrc->l_thread_lock,
1137 		    drv_usectohz(60 * MICROSEC), TR_SEC);
1138 		mutex_exit(&lmrc->l_thread_lock);
1139 
1140 		if (lmrc->l_thread_stop)
1141 			continue;
1142 
1143 		lmrc->l_fw_fault = lmrc_check_fw_fault(lmrc);
1144 
1145 		/*
1146 		 * Check all targets for timed-out commands. If we find any
1147 		 * and fail to abort them, we pretend the FW has faulted to
1148 		 * trigger a reset.
1149 		 */
1150 		if (!lmrc->l_fw_fault) {
1151 			for (i = 0; i < ARRAY_SIZE(lmrc->l_targets); i++) {
1152 				if (lmrc_tgt_timeout_cmds(lmrc,
1153 				    &lmrc->l_targets[i]) != DDI_SUCCESS) {
1154 					lmrc->l_fw_fault = B_TRUE;
1155 					break;
1156 				}
1157 			}
1158 		}
1159 
1160 		/*
1161 		 * If the FW is faulted, try to recover by performing a reset.
1162 		 */
1163 		if (lmrc->l_fw_fault) {
1164 			int ret;
1165 
1166 			lmrc_disable_intr(lmrc);
1167 
1168 			/*
1169 			 * Even if the reset failed, it will have stopped the
1170 			 * controller and we can complete all outstanding
1171 			 * commands.
1172 			 */
1173 			ret = lmrc_reset_ctrl(lmrc);
1174 
1175 			(void) lmrc_abort_outstanding_mfi(lmrc,
1176 			    LMRC_MAX_MFI_CMDS);
1177 
1178 			for (i = 0; i < ARRAY_SIZE(lmrc->l_targets); i++)
1179 				lmrc_tgt_complete_cmds(lmrc,
1180 				    &lmrc->l_targets[i]);
1181 
1182 			if (ret != DDI_SUCCESS) {
1183 				dev_err(lmrc->l_dip, CE_WARN, "reset failed");
1184 				continue;
1185 			}
1186 
1187 			if (lmrc_transition_to_ready(lmrc) != DDI_SUCCESS)
1188 				continue;
1189 
1190 			if (lmrc_ioc_init(lmrc) != DDI_SUCCESS)
1191 				continue;
1192 
1193 			lmrc_enable_intr(lmrc);
1194 
1195 			if (lmrc_start_aen(lmrc) != DDI_SUCCESS) {
1196 				dev_err(lmrc->l_dip, CE_WARN,
1197 				    "failed to re-initiate AEN");
1198 				continue;
1199 			}
1200 
1201 			lmrc->l_fw_fault = lmrc_check_fw_fault(lmrc);
1202 		}
1203 	} while (!lmrc->l_thread_stop);
1204 
1205 	thread_exit();
1206 }
1207 
1208 /*
1209  * lmrc_transition_to_ready
1210  *
1211  * Move firmware to ready state. At attach time, the FW can potentially be in
1212  * any one of several possible states. If the FW is in operational, waiting-for-
1213  * handshake states, take steps to bring it to ready state. Otherwise, wait for
1214  * the FW to reach ready state.
1215  */
1216 static int
1217 lmrc_transition_to_ready(lmrc_t *lmrc)
1218 {
1219 	uint32_t status, new_status;
1220 	uint32_t fw_state;
1221 	uint8_t max_wait;
1222 	uint_t i;
1223 
1224 	status = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(0));
1225 	fw_state = LMRC_FW_STATE(status);
1226 	max_wait = LMRC_RESET_TIMEOUT;
1227 
1228 	while (fw_state != LMRC_FW_STATE_READY) {
1229 		switch (fw_state) {
1230 		case LMRC_FW_STATE_FAULT:
1231 			dev_err(lmrc->l_dip, CE_NOTE, "FW is in fault state!");
1232 			if (lmrc_reset_ctrl(lmrc) != DDI_SUCCESS)
1233 				return (DDI_FAILURE);
1234 			break;
1235 
1236 		case LMRC_FW_STATE_WAIT_HANDSHAKE:
1237 			/* Set the CLR bit in inbound doorbell */
1238 			lmrc_write_reg(lmrc, LMRC_DOORBELL,
1239 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
1240 			break;
1241 
1242 		case LMRC_FW_STATE_BOOT_MSG_PENDING:
1243 			lmrc_write_reg(lmrc, LMRC_DOORBELL,
1244 			    MFI_INIT_HOTPLUG);
1245 			break;
1246 
1247 		case LMRC_FW_STATE_OPERATIONAL:
1248 			/* Bring it to READY state, wait up to 10s */
1249 			lmrc_disable_intr(lmrc);
1250 			lmrc_write_reg(lmrc, LMRC_DOORBELL, MFI_RESET_FLAGS);
1251 			(void) lmrc_wait_for_reg(lmrc, LMRC_DOORBELL, 1, 0, 10);
1252 
1253 			break;
1254 
1255 		case LMRC_FW_STATE_UNDEFINED:
1256 			/* This state should not last for more than 2 sec */
1257 		case LMRC_FW_STATE_BB_INIT:
1258 		case LMRC_FW_STATE_FW_INIT:
1259 		case LMRC_FW_STATE_FW_INIT_2:
1260 		case LMRC_FW_STATE_DEVICE_SCAN:
1261 		case LMRC_FW_STATE_FLUSH_CACHE:
1262 			break;
1263 		default:
1264 			dev_err(lmrc->l_dip, CE_WARN, "Unknown FW state %x",
1265 			    fw_state);
1266 			return (DDI_FAILURE);
1267 		}
1268 
1269 		/*
1270 		 * The current state should not last for more than max_wait
1271 		 * seconds.
1272 		 */
1273 		for (i = 0; i < max_wait * 1000; i++) {
1274 			new_status = lmrc_read_reg(lmrc,
1275 			    LMRC_OB_SCRATCH_PAD(0));
1276 
1277 			if (status != new_status)
1278 				break;
1279 
1280 			delay(drv_usectohz(MILLISEC));
1281 		}
1282 
1283 		if (new_status == status) {
1284 			dev_err(lmrc->l_dip, CE_WARN,
1285 			    "FW state (%x) hasn't changed in %d seconds",
1286 			    fw_state, max_wait);
1287 			return (DDI_FAILURE);
1288 		}
1289 
1290 		status = new_status;
1291 		fw_state = LMRC_FW_STATE(status);
1292 	}
1293 
1294 	if (lmrc_check_acc_handle(lmrc->l_reghandle) != DDI_FM_OK)
1295 		return (DDI_FAILURE);
1296 
1297 	return (DDI_SUCCESS);
1298 }
1299 
1300 /*
1301  * lmrc_adapter_init
1302  *
1303  * Get the hardware and firmware into a usable state, and fetch some basic
1304  * information from the registers to calculate sizes of basic data structures.
1305  */
1306 int
1307 lmrc_adapter_init(lmrc_t *lmrc)
1308 {
1309 	uint32_t reg;
1310 	int ret;
1311 	int i;
1312 
1313 	ret = lmrc_transition_to_ready(lmrc);
1314 	if (ret != DDI_SUCCESS)
1315 		return (ret);
1316 
1317 	/*
1318 	 * Get maximum RAID map size.
1319 	 */
1320 	reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(2));
1321 	lmrc->l_max_raid_map_sz = LMRC_MAX_RAID_MAP_SZ(reg);
1322 
1323 	lmrc->l_max_reply_queues = 1;
1324 	lmrc->l_rphi[0] = LMRC_REPLY_POST_HOST_INDEX;
1325 
1326 	/*
1327 	 * Apparently, bit 27 of the scratch pad register indicates whether
1328 	 * MSI-X is supported by the firmware.
1329 	 */
1330 	reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(0));
1331 
1332 	if (LMRC_FW_MSIX_ENABLED(reg)) {
1333 		lmrc->l_fw_msix_enabled = B_TRUE;
1334 
1335 		reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(1));
1336 		lmrc->l_max_reply_queues = LMRC_MAX_REPLY_QUEUES_EXT(reg);
1337 
1338 		if (lmrc->l_max_reply_queues > LMRC_MAX_REPLY_POST_HOST_INDEX) {
1339 			lmrc->l_msix_combined = B_TRUE;
1340 			lmrc->l_rphi[0] =
1341 			    LMRC_SUP_REPLY_POST_HOST_INDEX;
1342 		}
1343 
1344 		/*
1345 		 * Compute reply post index register addresses 1-15.
1346 		 */
1347 		for (i = 1; i < LMRC_MAX_REPLY_POST_HOST_INDEX; i++) {
1348 			lmrc->l_rphi[i] = i * 0x10 +
1349 			    LMRC_SUP_REPLY_POST_HOST_INDEX;
1350 		}
1351 	}
1352 
1353 	/*
1354 	 * Get the number of commands the firmware supports. Use one less,
1355 	 * because reply_q_depth is based on one more than this. XXX: Why?
1356 	 */
1357 	reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(0));
1358 	lmrc->l_max_fw_cmds = LMRC_FW_MAX_CMD(reg) - 1;
1359 
1360 	if (lmrc->l_max_fw_cmds < LMRC_MAX_MFI_CMDS) {
1361 		dev_err(lmrc->l_dip, CE_WARN, "!max_fw_cmds too low: %d",
1362 		    lmrc->l_max_fw_cmds);
1363 		return (DDI_FAILURE);
1364 	}
1365 
1366 	/*
1367 	 * Reserve some commands for MFI, the remainder is for SCSI commands.
1368 	 */
1369 	lmrc->l_max_scsi_cmds = lmrc->l_max_fw_cmds - LMRC_MAX_MFI_CMDS;
1370 
1371 	/*
1372 	 * XXX: This magic calculation isn't explained anywhere. Let's see...
1373 	 * lmrc_max_fw_cmds + 1 gives us what was reported in the register,
1374 	 * That + 15 is for rounding it up the next multiple of 16, which
1375 	 * / 16 * 16 does.
1376 	 * And apparently we want twice that much for queue depth. Why?
1377 	 *
1378 	 * So in reality, the queue depth is based on at least one more than
1379 	 * lmrc_max_fw_cmds, but it could be even more. That makes the above
1380 	 * statement about lmrc_max_fw_cmds questionable.
1381 	 */
1382 	lmrc->l_reply_q_depth = (lmrc->l_max_fw_cmds + 1 + 15) / 16 * 16 * 2;
1383 
1384 	/* Allocation size of one reply queue, based on depth. */
1385 	lmrc->l_reply_alloc_sz =
1386 	    sizeof (Mpi2ReplyDescriptorsUnion_t) * lmrc->l_reply_q_depth;
1387 
1388 	/* Allocation size of the DMA memory used for all MPI I/O frames. */
1389 	lmrc->l_io_frames_alloc_sz = LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
1390 	    (lmrc->l_max_fw_cmds + 2);
1391 
1392 	/*
1393 	 * If LMRC_EXT_CHAIN_SIZE_SUPPORT is set in scratch pad 1, firmware
1394 	 * supports an extended IO chain frame which is 4 times the size of a
1395 	 * legacy firmware frame.
1396 	 * Legacy Firmware frame size is (8 * 128) = 1K
1397 	 * 1M IO Firmware frame size is (8 * 128 * 4) = 4K
1398 	 */
1399 	reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(1));
1400 	lmrc->l_max_chain_frame_sz = LMRC_MAX_CHAIN_SIZE(reg) *
1401 	    (LMRC_EXT_CHAIN_SIZE_SUPPORT(reg) ? LMRC_1MB_IO : LMRC_256K_IO);
1402 
1403 	/*
1404 	 * Check whether the controller supports DMA to the full 64bit address
1405 	 * space.
1406 	 */
1407 	lmrc->l_64bit_dma_support = LMRC_64BIT_DMA_SUPPORT(reg);
1408 
1409 	/*
1410 	 * We use a I/O frame size of 256 bytes, that is what
1411 	 * LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE is set to.
1412 	 *
1413 	 * The offset of the SGL in the I/O frame is 128, so
1414 	 * there are 128 bytes left for 8 SGEs of 16 bytes each.
1415 	 */
1416 	lmrc->l_max_sge_in_main_msg =
1417 	    (LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1418 	    offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof (Mpi25SGEIOUnion_t);
1419 
1420 	/*
1421 	 * Similarly, number of SGE in a SGE chain frame.
1422 	 */
1423 	lmrc->l_max_sge_in_chain =
1424 	    lmrc->l_max_chain_frame_sz / sizeof (Mpi25SGEIOUnion_t);
1425 
1426 	/*
1427 	 * The total number of SGE we support in a transfer is sum of
1428 	 * the above two, minus one for the link (last SGE in main msg).
1429 	 *
1430 	 * XXX: So why -2?
1431 	 */
1432 	lmrc->l_max_num_sge =
1433 	    lmrc->l_max_sge_in_main_msg + lmrc->l_max_sge_in_chain - 2;
1434 
1435 	/*
1436 	 * The offset of the last SGE in the I/O request, used for linking
1437 	 * the SGE chain frame if necessary.
1438 	 */
1439 	lmrc->l_chain_offset_io_request =
1440 	    (LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1441 	    sizeof (Mpi25SGEIOUnion_t)) / sizeof (Mpi25SGEIOUnion_t);
1442 
1443 	/*
1444 	 * For MFI passthru, the link to the SGE chain frame is always
1445 	 * the first SGE in the I/O frame, the other SGEs in the I/O frame
1446 	 * will not be used.
1447 	 */
1448 	lmrc->l_chain_offset_mfi_pthru =
1449 	    offsetof(Mpi25SCSIIORequest_t, SGL) / sizeof (Mpi25SGEIOUnion_t);
1450 
1451 
1452 	reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(3));
1453 	if (LMRC_NVME_PAGE_SHIFT(reg) > LMRC_DEFAULT_NVME_PAGE_SHIFT) {
1454 		lmrc->l_nvme_page_sz = 1 << LMRC_NVME_PAGE_SHIFT(reg);
1455 		dev_err(lmrc->l_dip, CE_NOTE, "!NVME page size: %ld",
1456 		    lmrc->l_nvme_page_sz);
1457 	}
1458 
1459 	reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(1));
1460 	lmrc->l_fw_sync_cache_support = LMRC_SYNC_CACHE_SUPPORT(reg);
1461 
1462 	if (lmrc->l_class == LMRC_ACLASS_AERO) {
1463 		reg = lmrc_read_reg(lmrc, LMRC_OB_SCRATCH_PAD(1));
1464 		lmrc->l_atomic_desc_support =
1465 		    LMRC_ATOMIC_DESCRIPTOR_SUPPORT(reg);
1466 	}
1467 
1468 	return (DDI_SUCCESS);
1469 }
1470 
1471 /*
1472  * lmrc_ioc_init
1473  *
1474  * Manually build a MFI IOC INIT command to setup basic operating parameters
1475  * such as the DMA parameters for the I/O request frames and the reply post
1476  * queues. Send the IOC INIT command using a special request descriptor which
1477  * directly includes the physical address of the MFI command frame.
1478  *
1479  * After this command completes, the controller is ready to accept MPT commands
1480  * using the normal method of placing it in the I/O request DMA memory and
1481  * writing a MPT request descripter to the appropriate registers.
1482  */
1483 int
1484 lmrc_ioc_init(lmrc_t *lmrc)
1485 {
1486 	lmrc_mfi_cmd_t *mfi = lmrc_get_mfi(lmrc);
1487 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
1488 	lmrc_mfi_init_payload_t *init = &mfi->mfi_frame->mf_init;
1489 	lmrc_req_desc_t req_desc;
1490 	Mpi2IOCInitRequest_t *IOCInitMsg;
1491 	lmrc_dma_t dma;
1492 	int ret = DDI_SUCCESS;
1493 
1494 	ret = lmrc_dma_alloc(lmrc, lmrc->l_dma_attr, &dma,
1495 	    sizeof (Mpi2IOCInitRequest_t), 256, DDI_DMA_CONSISTENT);
1496 	if (ret != DDI_SUCCESS) {
1497 		lmrc_put_mfi(mfi);
1498 		dev_err(lmrc->l_dip, CE_WARN,
1499 		    "!%s: failed to allocate IOC command", __func__);
1500 		return (DDI_FAILURE);
1501 	}
1502 
1503 	IOCInitMsg = dma.ld_buf;
1504 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1505 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1506 	IOCInitMsg->MsgVersion = MPI2_VERSION;
1507 	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1508 	IOCInitMsg->SystemRequestFrameSize =
1509 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1510 	IOCInitMsg->ReplyDescriptorPostQueueDepth = lmrc->l_reply_q_depth;
1511 	lmrc_dma_set_addr64(&lmrc->l_reply_dma,
1512 	    (uint64_t *)&IOCInitMsg->ReplyDescriptorPostQueueAddress);
1513 	lmrc_dma_set_addr64(&lmrc->l_ioreq_dma,
1514 	    (uint64_t *)&IOCInitMsg->SystemRequestFrameBaseAddress);
1515 	IOCInitMsg->HostMSIxVectors = lmrc->l_max_reply_queues;
1516 	/* XXX: Why NVMe? */
1517 	IOCInitMsg->HostPageSize = LMRC_DEFAULT_NVME_PAGE_SHIFT;
1518 
1519 	hdr->mh_cmd = MFI_CMD_INIT;
1520 	hdr->mh_cmd_status = MFI_STAT_INVALID_STATUS;
1521 	hdr->mh_flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1522 
1523 	hdr->mh_drv_opts.mc_support_additional_msix = 1;
1524 	hdr->mh_drv_opts.mc_support_max_255lds = 1;
1525 	hdr->mh_drv_opts.mc_support_ndrive_r1_lb = 1;
1526 	hdr->mh_drv_opts.mc_support_security_protocol_cmds_fw = 1;
1527 	hdr->mh_drv_opts.mc_support_ext_io_size = 1;
1528 
1529 	hdr->mh_data_xfer_len = lmrc_dma_get_size(&dma);
1530 
1531 	lmrc_dma_set_addr64(&dma, &init->mi_queue_info_new_phys_addr);
1532 
1533 	lmrc_dma_set_addr64(&mfi->mfi_frame_dma, &req_desc.rd_reg);
1534 	VERIFY0(req_desc.rd_mfa_io.RequestFlags);
1535 	req_desc.rd_mfa_io.RequestFlags = LMRC_REQ_DESCRIPT_FLAGS_MFA;
1536 
1537 	lmrc_disable_intr(lmrc);
1538 	if (!lmrc_wait_for_reg(lmrc, LMRC_DOORBELL, 1, 0, 10))
1539 		return (DDI_FAILURE);
1540 
1541 	(void) ddi_dma_sync(dma.ld_hdl, 0, dma.ld_len, DDI_DMA_SYNC_FORDEV);
1542 	(void) ddi_dma_sync(mfi->mfi_frame_dma.ld_hdl, 0,
1543 	    mfi->mfi_frame_dma.ld_len, DDI_DMA_SYNC_FORDEV);
1544 
1545 	lmrc_send_request(lmrc, req_desc);
1546 
1547 	mutex_enter(&mfi->mfi_lock);
1548 	ret = lmrc_poll_mfi(lmrc, mfi, LMRC_INTERNAL_CMD_WAIT_TIME);
1549 	mutex_exit(&mfi->mfi_lock);
1550 
1551 	if (ret != DDI_SUCCESS) {
1552 		if (hdr->mh_cmd_status != MFI_STAT_INVALID_STATUS)
1553 			dev_err(lmrc->l_dip, CE_WARN,
1554 			    "!IOC Init failed, status = 0x%x",
1555 			    hdr->mh_cmd_status);
1556 	}
1557 
1558 	lmrc_dma_free(&dma);
1559 	lmrc_put_mfi(mfi);
1560 
1561 	return (ret);
1562 }
1563 
1564 /*
1565  * lmrc_get_ctrl_info
1566  *
1567  * Build a MFI DCMD to get controller information from FW. Update the copy in
1568  * the soft state.
1569  */
1570 static int
1571 lmrc_get_ctrl_info(lmrc_t *lmrc)
1572 {
1573 	lmrc_ctrl_info_t *ci = lmrc->l_ctrl_info;
1574 	lmrc_mfi_cmd_t *mfi;
1575 	int ret;
1576 
1577 	mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, LMRC_DCMD_CTRL_GET_INFO,
1578 	    sizeof (lmrc_ctrl_info_t), 1);
1579 
1580 	if (mfi == NULL)
1581 		return (DDI_FAILURE);
1582 
1583 	ret = lmrc_issue_blocked_mfi(lmrc, mfi);
1584 
1585 	if (ret != DDI_SUCCESS)
1586 		goto out;
1587 
1588 	(void) ddi_dma_sync(mfi->mfi_data_dma.ld_hdl, 0,
1589 	    mfi->mfi_data_dma.ld_len, DDI_DMA_SYNC_FORKERNEL);
1590 	bcopy(mfi->mfi_data_dma.ld_buf, ci, sizeof (lmrc_ctrl_info_t));
1591 
1592 out:
1593 	lmrc_put_dcmd(lmrc, mfi);
1594 	return (ret);
1595 }
1596 
1597 /*
1598  * lmrc_fw_init
1599  *
1600  * Complete firmware initialization. At this point, we can already send MFI
1601  * commands. so we can start by getting the controller information from the
1602  * firmware and set up things in our soft state. Next we issue the commands
1603  * to get the PD map and RAID map, which will complete asynchronously when
1604  * new information is available and then re-send themselves.
1605  */
1606 int
1607 lmrc_fw_init(lmrc_t *lmrc)
1608 {
1609 	int drv_max_lds = LMRC_MAX_LOGICAL_DRIVES;
1610 	lmrc_ctrl_info_t *ci = lmrc->l_ctrl_info;
1611 	int ret;
1612 
1613 	ret = lmrc_get_ctrl_info(lmrc);
1614 	if (ret != DDI_SUCCESS) {
1615 		dev_err(lmrc->l_dip, CE_WARN, "!Unable to get FW ctrl info.");
1616 		return (DDI_FAILURE);
1617 	}
1618 
1619 	lmrc->l_disable_online_ctrl_reset =
1620 	    ci->ci_prop.cp_disable_online_ctrl_reset == 1;
1621 
1622 	lmrc->l_max_256_vd_support =
1623 	    ci->ci_adapter_opts3.ao3_support_max_ext_lds == 1;
1624 
1625 	if (ci->ci_max_lds > 64) {
1626 		lmrc->l_max_256_vd_support = B_TRUE;
1627 		drv_max_lds = LMRC_MAX_LOGICAL_DRIVES_EXT;
1628 	}
1629 
1630 	lmrc->l_fw_supported_vd_count = min(ci->ci_max_lds, drv_max_lds);
1631 
1632 	lmrc->l_fw_supported_pd_count = min(ci->ci_max_pds, LMRC_MAX_PHYS_DEV);
1633 
1634 	lmrc->l_max_map_sz = lmrc->l_current_map_sz =
1635 	    lmrc->l_max_raid_map_sz * LMRC_MIN_MAP_SIZE;
1636 
1637 	lmrc->l_use_seqnum_jbod_fp =
1638 	    ci->ci_adapter_opts3.ao3_use_seq_num_jbod_FP != 0;
1639 
1640 	lmrc->l_pdmap_tgtid_support =
1641 	    ci->ci_adapter_opts4.ao4_support_pd_map_target_id != 0;
1642 
1643 	return (DDI_SUCCESS);
1644 }
1645 
1646 
1647 /*
1648  * lmrc_ctrl_shutdown
1649  *
1650  * Called by lmrc_quiesce() to send a shutdown command to the controller.
1651  * Cannot use locks, therefore cannot use lmrc_get_dcmd() or lmrc_get_mfi().
1652  */
1653 int
1654 lmrc_ctrl_shutdown(lmrc_t *lmrc)
1655 {
1656 	lmrc_mfi_cmd_t *mfi = list_remove_head(&lmrc->l_mfi_cmd_list);
1657 	lmrc_mfi_header_t *hdr;
1658 	lmrc_mfi_dcmd_payload_t *dcmd;
1659 
1660 	if (mfi == NULL)
1661 		return (DDI_FAILURE);
1662 
1663 	hdr = &mfi->mfi_frame->mf_hdr;
1664 	dcmd = &mfi->mfi_frame->mf_dcmd;
1665 
1666 	hdr->mh_cmd = MFI_CMD_DCMD;
1667 	hdr->mh_flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1668 	dcmd->md_opcode = LMRC_DCMD_CTRL_SHUTDOWN;
1669 
1670 	lmrc_disable_intr(lmrc);
1671 	lmrc_issue_mfi(lmrc, mfi, NULL);
1672 
1673 	return (DDI_SUCCESS);
1674 }
1675 
1676 /*
1677  * driver target state management
1678  *
1679  * The soft state of the controller instance keeps a pre-allocated array of
1680  * target structures for all possible targets, even though only a small number
1681  * of them are likely to be used. Each target structure contains back link to
1682  * the soft state and a mutex, which are never cleared or changed when a target
1683  * is added or removed.
1684  */
1685 
1686 /*
1687  * lmrc_tgt_init
1688  *
1689  * Initialize the tgt structure for a newly discovered tgt. The same tgt
1690  * structure is used for PDs and LDs, the distinction can be made by the
1691  * presence or absence of tgt_pd_info. LDs are always of type disk, the type
1692  * of PDs is taken from their pd_info. If a device has no SAS WWN, we'll fake
1693  * the interconnect type to be PARALLEL to make sure device address isn't
1694  * misunderstood as a WWN by devfsadm.
1695  */
1696 void
1697 lmrc_tgt_init(lmrc_tgt_t *tgt, uint16_t dev_id, char *addr,
1698     lmrc_pd_info_t *pd_info)
1699 {
1700 	rw_enter(&tgt->tgt_lock, RW_WRITER);
1701 
1702 	bzero(&tgt->tgt_dev_id,
1703 	    sizeof (lmrc_tgt_t) - offsetof(lmrc_tgt_t, tgt_dev_id));
1704 
1705 	tgt->tgt_dev_id = dev_id;
1706 	tgt->tgt_pd_info = pd_info;
1707 	tgt->tgt_interconnect_type = INTERCONNECT_SAS;
1708 
1709 	if (pd_info == NULL) {
1710 		tgt->tgt_type = DTYPE_DIRECT;
1711 	} else {
1712 		tgt->tgt_type = pd_info->pd_scsi_dev_type;
1713 	}
1714 
1715 	(void) strlcpy(tgt->tgt_wwnstr, addr, sizeof (tgt->tgt_wwnstr));
1716 	if (scsi_wwnstr_to_wwn(tgt->tgt_wwnstr, &tgt->tgt_wwn) != DDI_SUCCESS) {
1717 		tgt->tgt_interconnect_type = INTERCONNECT_PARALLEL;
1718 		tgt->tgt_wwn = dev_id;
1719 	}
1720 
1721 	rw_exit(&tgt->tgt_lock);
1722 }
1723 
1724 /*
1725  * lmrc_tgt_clear
1726  *
1727  * Reset the tgt structure of a target which is no longer present.
1728  */
1729 void
1730 lmrc_tgt_clear(lmrc_tgt_t *tgt)
1731 {
1732 	rw_enter(&tgt->tgt_lock, RW_WRITER);
1733 
1734 	if (tgt->tgt_pd_info != NULL)
1735 		kmem_free(tgt->tgt_pd_info, sizeof (lmrc_pd_info_t));
1736 
1737 	bzero(&tgt->tgt_dev_id,
1738 	    sizeof (lmrc_tgt_t) - offsetof(lmrc_tgt_t, tgt_dev_id));
1739 	tgt->tgt_dev_id = LMRC_DEVHDL_INVALID;
1740 	rw_exit(&tgt->tgt_lock);
1741 }
1742 
1743 /*
1744  * lmrc_tgt_find
1745  *
1746  * Walk the target list and find a tgt matching the given scsi_device.
1747  * Return the tgt read-locked. The targets_lock mutex must be held the
1748  * whole time.
1749  */
1750 lmrc_tgt_t *
1751 lmrc_tgt_find(lmrc_t *lmrc, struct scsi_device *sd)
1752 {
1753 	const char *ua = scsi_device_unit_address(sd);
1754 	char *comma, wwnstr[SCSI_WWN_BUFLEN];
1755 	uint64_t wwn;
1756 	unsigned long tgtid;
1757 	lmrc_tgt_t *tgt;
1758 	size_t i;
1759 
1760 	VERIFY(ua != NULL);
1761 
1762 	(void) strlcpy(wwnstr, ua, sizeof (wwnstr));
1763 
1764 	/*
1765 	 * If the unit address is a valid target ID and within range for
1766 	 * VD IDs, use that.
1767 	 */
1768 	if (ddi_strtoul(wwnstr, &comma, 10, &tgtid) == 0 &&
1769 	    *comma == ',' &&
1770 	    tgtid <= lmrc->l_fw_supported_vd_count) {
1771 		tgt = &lmrc->l_targets[tgtid];
1772 
1773 		rw_enter(&tgt->tgt_lock, RW_READER);
1774 		if (tgt->tgt_dev_id == tgtid &&
1775 		    tgt->tgt_wwn == tgtid) {
1776 			return (tgt);
1777 		}
1778 		rw_exit(&tgt->tgt_lock);
1779 	}
1780 
1781 	/* Chop off ",lun" as scsi_wwnstr_to_wwn() can't handle it. */
1782 	comma = strchr(wwnstr, ',');
1783 	if (comma != NULL)
1784 		*comma = '\0';
1785 
1786 	/* Else, if unit address is a valid WWN, look for that. */
1787 	if (scsi_wwnstr_to_wwn(wwnstr, &wwn) == DDI_SUCCESS) {
1788 		for (i = 0; i < ARRAY_SIZE(lmrc->l_targets); i++) {
1789 			tgt = &lmrc->l_targets[i];
1790 
1791 			rw_enter(&tgt->tgt_lock, RW_READER);
1792 			if (tgt->tgt_wwn == wwn) {
1793 				return (tgt);
1794 			}
1795 			rw_exit(&tgt->tgt_lock);
1796 		}
1797 	} else {
1798 		/* Do it the hard way and compare wwnstr. */
1799 		for (i = 0; i < ARRAY_SIZE(lmrc->l_targets); i++) {
1800 			tgt = &lmrc->l_targets[i];
1801 
1802 			rw_enter(&tgt->tgt_lock, RW_READER);
1803 			if (strcmp(tgt->tgt_wwnstr, wwnstr) == 0) {
1804 				return (tgt);
1805 			}
1806 			rw_exit(&tgt->tgt_lock);
1807 		}
1808 	}
1809 
1810 	return (NULL);
1811 }
1812 
1813 /*
1814  * MPT/MFI command management
1815  *
1816  * For each kind of command, MFI and MPT, the driver keeps an array of pre-
1817  * allocated and pre-initialized commands. Additionally, it keeps two lists of
1818  * currently unused commands. A set of functions is provided for each list to
1819  * get and put commands from/to the list. Commands are initialized during get(),
1820  * because having completed commands on the list can help in certain cases
1821  * during debugging.
1822  *
1823  * MPT commands in use for I/O are kept on a active command list of the target
1824  * they are addressing. All other types of commands are not kept on any list
1825  * while they are being processed by the hardware. When walking the command
1826  * arrays, busy commands not associated with a target can be distinguished by
1827  * not being linked on any list.
1828  */
1829 
1830 /*
1831  * lmrc_get_mpt
1832  *
1833  * Get a MPT command from the list and initialize it. Return the command locked.
1834  */
1835 lmrc_mpt_cmd_t *
1836 lmrc_get_mpt(lmrc_t *lmrc)
1837 {
1838 	lmrc_mpt_cmd_t *mpt;
1839 	Mpi25SCSIIORequest_t *io_req;
1840 
1841 	mutex_enter(&lmrc->l_mpt_cmd_lock);
1842 	mpt = list_remove_head(&lmrc->l_mpt_cmd_list);
1843 	mutex_exit(&lmrc->l_mpt_cmd_lock);
1844 	VERIFY(mpt != NULL);
1845 
1846 	mutex_enter(&mpt->mpt_lock);
1847 	bzero(mpt->mpt_io_frame, LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
1848 	bzero(mpt->mpt_chain_dma.ld_buf, mpt->mpt_chain_dma.ld_len);
1849 	bzero(mpt->mpt_sense_dma.ld_buf, mpt->mpt_sense_dma.ld_len);
1850 
1851 	mpt->mpt_mfi = NULL;
1852 	mpt->mpt_pkt = NULL;
1853 
1854 	/* Set the offset of the SGL entries inside the MPT command. */
1855 	io_req = mpt->mpt_io_frame;
1856 	io_req->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
1857 
1858 	mpt->mpt_complete = B_FALSE;
1859 	cv_init(&mpt->mpt_cv, NULL, CV_DRIVER, NULL);
1860 
1861 	return (mpt);
1862 }
1863 
1864 /*
1865  * lmrc_put_mpt
1866  *
1867  * Put a MPT command back on the list. Destroy the CV, thereby
1868  * asserting that no one is waiting on it.
1869  */
1870 void
1871 lmrc_put_mpt(lmrc_mpt_cmd_t *mpt)
1872 {
1873 	lmrc_t *lmrc = mpt->mpt_lmrc;
1874 
1875 	VERIFY(lmrc != NULL);
1876 
1877 	ASSERT0(list_link_active(&mpt->mpt_node));
1878 	ASSERT(mutex_owned(&mpt->mpt_lock));
1879 	cv_destroy(&mpt->mpt_cv);
1880 
1881 	mutex_enter(&lmrc->l_mpt_cmd_lock);
1882 	list_insert_tail(&lmrc->l_mpt_cmd_list, mpt);
1883 	mutex_exit(&lmrc->l_mpt_cmd_lock);
1884 	mutex_exit(&mpt->mpt_lock);
1885 }
1886 
1887 /*
1888  * lmrc_get_mfi
1889  *
1890  * Get a MFI command from the list and initialize it.
1891  */
1892 lmrc_mfi_cmd_t *
1893 lmrc_get_mfi(lmrc_t *lmrc)
1894 {
1895 	lmrc_mfi_cmd_t *mfi;
1896 
1897 	mutex_enter(&lmrc->l_mfi_cmd_lock);
1898 	mfi = list_remove_head(&lmrc->l_mfi_cmd_list);
1899 	mutex_exit(&lmrc->l_mfi_cmd_lock);
1900 	VERIFY(mfi != NULL);
1901 
1902 	mutex_enter(&mfi->mfi_lock);
1903 	bzero(mfi->mfi_frame, sizeof (lmrc_mfi_frame_t));
1904 	mfi->mfi_frame->mf_hdr.mh_context = mfi->mfi_idx;
1905 	mfi->mfi_callback = NULL;
1906 	mfi->mfi_mpt = NULL;
1907 
1908 	cv_init(&mfi->mfi_cv, NULL, CV_DRIVER, NULL);
1909 	mutex_exit(&mfi->mfi_lock);
1910 
1911 	return (mfi);
1912 }
1913 
1914 /*
1915  * lmrc_put_mfi
1916  *
1917  * Put a MFI command back on the list. Destroy the CV, thereby
1918  * asserting that no one is waiting on it.
1919  */
1920 void
1921 lmrc_put_mfi(lmrc_mfi_cmd_t *mfi)
1922 {
1923 	lmrc_t *lmrc = mfi->mfi_lmrc;
1924 
1925 	VERIFY(lmrc != NULL);
1926 
1927 	ASSERT0(list_link_active(&mfi->mfi_node));
1928 
1929 	mutex_enter(&mfi->mfi_lock);
1930 	if (mfi->mfi_mpt != NULL) {
1931 		mutex_enter(&mfi->mfi_mpt->mpt_lock);
1932 		lmrc_put_mpt(mfi->mfi_mpt);
1933 	}
1934 
1935 	cv_destroy(&mfi->mfi_cv);
1936 
1937 	mutex_enter(&lmrc->l_mfi_cmd_lock);
1938 	list_insert_tail(&lmrc->l_mfi_cmd_list, mfi);
1939 	mutex_exit(&lmrc->l_mfi_cmd_lock);
1940 	mutex_exit(&mfi->mfi_lock);
1941 }
1942 
1943 /*
1944  * lmrc_abort_outstanding_mfi
1945  *
1946  * Walk the MFI cmd array and abort each command which is still outstanding,
1947  * which is indicated by not being linked on l_mfi_cmd_list.
1948  *
1949  * As a special case, if the FW is in fault state, just call each commands
1950  * completion callback.
1951  */
1952 int
1953 lmrc_abort_outstanding_mfi(lmrc_t *lmrc, const size_t ncmd)
1954 {
1955 	int ret;
1956 	int i;
1957 
1958 	for (i = 0; i < ncmd; i++) {
1959 		lmrc_mfi_cmd_t *mfi = lmrc->l_mfi_cmds[i];
1960 
1961 		mutex_enter(&mfi->mfi_lock);
1962 		if (list_link_active(&mfi->mfi_node)) {
1963 			mutex_exit(&mfi->mfi_lock);
1964 			continue;
1965 		}
1966 
1967 		/*
1968 		 * If the FW is faulted, wake up anyone waiting on the command
1969 		 * to clean it up.
1970 		 */
1971 		if (lmrc->l_fw_fault) {
1972 			if (mfi->mfi_callback != NULL)
1973 				mfi->mfi_callback(lmrc, mfi);
1974 			mutex_exit(&mfi->mfi_lock);
1975 			continue;
1976 		}
1977 
1978 		ret = lmrc_abort_cmd(lmrc, mfi);
1979 		mutex_exit(&mfi->mfi_lock);
1980 		if (ret != DDI_SUCCESS)
1981 			return (ret);
1982 
1983 		lmrc_dma_free(&mfi->mfi_data_dma);
1984 		lmrc_put_mfi(mfi);
1985 	}
1986 
1987 	return (DDI_SUCCESS);
1988 }
1989 
1990 /*
1991  * lmrc_get_dcmd
1992  *
1993  * Build a MFI DCMD with DMA memory for data transfers.
1994  */
1995 lmrc_mfi_cmd_t *
1996 lmrc_get_dcmd(lmrc_t *lmrc, uint16_t flags, uint32_t opcode, uint32_t xferlen,
1997     uint_t align)
1998 {
1999 	lmrc_mfi_cmd_t *mfi = lmrc_get_mfi(lmrc);
2000 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
2001 	lmrc_mfi_dcmd_payload_t *dcmd = &mfi->mfi_frame->mf_dcmd;
2002 	lmrc_dma_t *dma = &mfi->mfi_data_dma;
2003 	int ret;
2004 
2005 	hdr->mh_cmd = MFI_CMD_DCMD;
2006 	hdr->mh_flags = flags;
2007 
2008 	dcmd->md_opcode = opcode;
2009 
2010 	if ((flags & MFI_FRAME_DIR_READ) != 0 ||
2011 	    (flags & MFI_FRAME_DIR_WRITE) != 0) {
2012 		ret = lmrc_dma_alloc(lmrc, lmrc->l_dma_attr, dma, xferlen,
2013 		    align, DDI_DMA_CONSISTENT);
2014 		if (ret != DDI_SUCCESS) {
2015 			lmrc_put_mfi(mfi);
2016 			return (NULL);
2017 		}
2018 
2019 		hdr->mh_flags |= MFI_FRAME_SGL64;
2020 		hdr->mh_sge_count = 1;
2021 		hdr->mh_data_xfer_len = lmrc_dma_get_size(dma);
2022 
2023 		dcmd->md_sgl.ms64_length = lmrc_dma_get_size(dma);
2024 		lmrc_dma_set_addr64(dma, &dcmd->md_sgl.ms64_phys_addr);
2025 	}
2026 
2027 	return (mfi);
2028 }
2029 
2030 /*
2031  * lmrc_put_dcmd
2032  *
2033  * Free the DMA memory of a MFI DCMD and return the command back on the list.
2034  */
2035 void
2036 lmrc_put_dcmd(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
2037 {
2038 	lmrc_dma_free(&mfi->mfi_data_dma);
2039 	lmrc_put_mfi(mfi);
2040 }
2041 
2042 
2043 /*
2044  * Asynchronous Event Notifications
2045  */
2046 /*
2047  * lmrc_get_event_log_info
2048  *
2049  * Get the Event Log Info from the firmware.
2050  */
2051 static int
2052 lmrc_get_event_log_info(lmrc_t *lmrc, lmrc_evt_log_info_t *eli)
2053 {
2054 	lmrc_mfi_cmd_t *mfi;
2055 	int ret;
2056 
2057 	mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ,
2058 	    LMRC_DCMD_CTRL_EVENT_GET_INFO, sizeof (lmrc_evt_log_info_t), 1);
2059 
2060 	if (mfi == NULL)
2061 		return (DDI_FAILURE);
2062 
2063 	ret = lmrc_issue_blocked_mfi(lmrc, mfi);
2064 
2065 	if (ret != DDI_SUCCESS)
2066 		goto out;
2067 
2068 	bcopy(mfi->mfi_data_dma.ld_buf, eli, sizeof (lmrc_evt_log_info_t));
2069 
2070 out:
2071 	lmrc_put_dcmd(lmrc, mfi);
2072 	return (ret);
2073 }
2074 
2075 /*
2076  * lmrc_aen_handler
2077  *
2078  * Check the event code and handle it as needed. In the case of PD or LD related
2079  * events, invoke their special handlers.
2080  */
2081 static void
2082 lmrc_aen_handler(void *arg)
2083 {
2084 	lmrc_mfi_cmd_t *mfi = arg;
2085 	lmrc_t *lmrc = mfi->mfi_lmrc;
2086 	lmrc_evt_t *evt = mfi->mfi_data_dma.ld_buf;
2087 	lmrc_mfi_dcmd_payload_t *dcmd = &mfi->mfi_frame->mf_dcmd;
2088 	int ret = DDI_FAILURE;
2089 
2090 	/* Controller & Configuration specific events */
2091 	switch (evt->evt_code) {
2092 	case LMRC_EVT_CFG_CLEARED:
2093 	case LMRC_EVT_CTRL_HOST_BUS_SCAN_REQD:
2094 	case LMRC_EVT_FOREIGN_CFG_IMPORTED:
2095 		ret = lmrc_get_pd_list(lmrc);
2096 		if (ret != DDI_SUCCESS)
2097 			break;
2098 
2099 		ret = lmrc_get_ld_list(lmrc);
2100 		break;
2101 
2102 	case LMRC_EVT_CTRL_PROP_CHANGED:
2103 		ret = lmrc_get_ctrl_info(lmrc);
2104 		break;
2105 
2106 	case LMRC_EVT_CTRL_PATROL_READ_START:
2107 	case LMRC_EVT_CTRL_PATROL_READ_RESUMED:
2108 	case LMRC_EVT_CTRL_PATROL_READ_COMPLETE:
2109 	case LMRC_EVT_CTRL_PATROL_READ_CANT_START:
2110 	case LMRC_EVT_CTRL_PERF_COLLECTION:
2111 	case LMRC_EVT_CTRL_BOOTDEV_SET:
2112 	case LMRC_EVT_CTRL_BOOTDEV_RESET:
2113 	case LMRC_EVT_CTRL_PERSONALITY_CHANGE:
2114 	case LMRC_EVT_CTRL_PERSONALITY_CHANGE_PEND:
2115 	case LMRC_EVT_CTRL_NR_OF_VALID_SNAPDUMP:
2116 		break;
2117 
2118 	default:
2119 		/* LD-specific events */
2120 		if ((evt->evt_locale & LMRC_EVT_LOCALE_LD) != 0)
2121 			ret = lmrc_raid_aen_handler(lmrc, evt);
2122 
2123 		/* PD-specific events */
2124 		else if ((evt->evt_locale & LMRC_EVT_LOCALE_PD) != 0)
2125 			ret = lmrc_phys_aen_handler(lmrc, evt);
2126 
2127 		if (ret != DDI_SUCCESS) {
2128 			dev_err(lmrc->l_dip, CE_NOTE, "!unknown AEN received, "
2129 			    "seqnum = %d, timestamp = %d, code = %x, "
2130 			    "locale = %x, class = %d, argtype = %d",
2131 			    evt->evt_seqnum, evt->evt_timestamp, evt->evt_code,
2132 			    evt->evt_locale, evt->evt_class, evt->evt_argtype);
2133 		}
2134 	}
2135 
2136 	dev_err(lmrc->l_dip, CE_NOTE, "!%s", evt->evt_descr);
2137 
2138 	/*
2139 	 * Just reuse the command in its entirety. Increase the sequence
2140 	 * number.
2141 	 */
2142 	dcmd->md_mbox_32[0] = evt->evt_seqnum + 1;
2143 	mutex_enter(&mfi->mfi_lock);
2144 	mutex_enter(&mfi->mfi_mpt->mpt_lock);
2145 	lmrc_issue_mfi(lmrc, mfi, lmrc_complete_aen);
2146 	mutex_exit(&mfi->mfi_mpt->mpt_lock);
2147 	mutex_exit(&mfi->mfi_lock);
2148 }
2149 
2150 /*
2151  * lmrc_complete_aen
2152  *
2153  * An AEN was received, so schedule a taskq to process it.
2154  */
2155 static void
2156 lmrc_complete_aen(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
2157 {
2158 	lmrc_mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
2159 
2160 	ASSERT(mutex_owned(&mfi->mfi_lock));
2161 
2162 	if (hdr->mh_cmd_status != MFI_STAT_OK) {
2163 		/* Was the command aborted? */
2164 		if (hdr->mh_cmd_status == MFI_STAT_NOT_FOUND)
2165 			return;
2166 
2167 		dev_err(lmrc->l_dip, CE_WARN,
2168 		    "!AEN failed, status = %d",
2169 		    hdr->mh_cmd_status);
2170 		taskq_dispatch_ent(lmrc->l_taskq, (task_func_t *)lmrc_put_mfi,
2171 		    mfi, TQ_NOSLEEP, &mfi->mfi_tqent);
2172 		return;
2173 	}
2174 
2175 	taskq_dispatch_ent(lmrc->l_taskq, lmrc_aen_handler, mfi, TQ_NOSLEEP,
2176 	    &mfi->mfi_tqent);
2177 }
2178 
2179 /*
2180  * lmrc_register_aen
2181  *
2182  * In FreeBSD, this function checks for an existing AEN. If its class and locale
2183  * already include what is requested here they just return. In the other case,
2184  * the existing AEN is aborted and a new one is created, which includes
2185  * the previous locale and class and new ones.
2186  *
2187  * Given that the driver (same as in FreeBSD) calls this function during attach
2188  * to create an AEN with LOCALE_ALL and CLASS_DEBUG, all of this would be dead
2189  * code anyway.
2190  */
2191 static int
2192 lmrc_register_aen(lmrc_t *lmrc, uint32_t seqnum)
2193 {
2194 	lmrc_evt_class_locale_t ecl = {
2195 		.ecl_class = LMRC_EVT_CLASS_DEBUG,
2196 		.ecl_locale = LMRC_EVT_LOCALE_ALL
2197 	};
2198 
2199 	lmrc_mfi_cmd_t *mfi;
2200 	lmrc_mfi_dcmd_payload_t *dcmd;
2201 
2202 	mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, LMRC_DCMD_CTRL_EVENT_WAIT,
2203 	    sizeof (lmrc_evt_t), 1);
2204 
2205 	if (mfi == NULL)
2206 		return (DDI_FAILURE);
2207 
2208 	dcmd = &mfi->mfi_frame->mf_dcmd;
2209 	dcmd->md_mbox_32[0] = seqnum;
2210 	dcmd->md_mbox_32[1] = ecl.ecl_word;
2211 
2212 	mutex_enter(&mfi->mfi_lock);
2213 	lmrc_issue_mfi(lmrc, mfi, lmrc_complete_aen);
2214 	mutex_exit(&mfi->mfi_lock);
2215 
2216 	return (DDI_SUCCESS);
2217 }
2218 
2219 /*
2220  * lmrc_start_aen
2221  *
2222  * Set up and enable AEN processing.
2223  */
2224 int
2225 lmrc_start_aen(lmrc_t *lmrc)
2226 {
2227 	lmrc_evt_log_info_t eli;
2228 	int ret;
2229 
2230 	bzero(&eli, sizeof (eli));
2231 
2232 	/* Get the latest sequence number from the Event Log Info. */
2233 	ret = lmrc_get_event_log_info(lmrc, &eli);
2234 	if (ret != DDI_SUCCESS)
2235 		return (ret);
2236 
2237 	/* Register AEN with FW for latest sequence number + 1. */
2238 	ret = lmrc_register_aen(lmrc, eli.eli_newest_seqnum + 1);
2239 	return (ret);
2240 }
2241