xref: /linux/drivers/scsi/cxlflash/main.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /*
2  * CXL Flash Device Driver
3  *
4  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2015 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 
20 #include <asm/unaligned.h>
21 
22 #include <misc/cxl.h>
23 
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27 
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31 
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36 
37 /**
38  * process_cmd_err() - command error handler
39  * @cmd:	AFU command that experienced the error.
40  * @scp:	SCSI command associated with the AFU command in error.
41  *
42  * Translates error bits from AFU command to SCSI command results.
43  */
44 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
45 {
46 	struct afu *afu = cmd->parent;
47 	struct cxlflash_cfg *cfg = afu->parent;
48 	struct device *dev = &cfg->dev->dev;
49 	struct sisl_ioarcb *ioarcb;
50 	struct sisl_ioasa *ioasa;
51 	u32 resid;
52 
53 	if (unlikely(!cmd))
54 		return;
55 
56 	ioarcb = &(cmd->rcb);
57 	ioasa = &(cmd->sa);
58 
59 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
60 		resid = ioasa->resid;
61 		scsi_set_resid(scp, resid);
62 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
63 			__func__, cmd, scp, resid);
64 	}
65 
66 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
67 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
68 			__func__, cmd, scp);
69 		scp->result = (DID_ERROR << 16);
70 	}
71 
72 	dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
73 		"afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
74 		ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
75 		ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
76 
77 	if (ioasa->rc.scsi_rc) {
78 		/* We have a SCSI status */
79 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
80 			memcpy(scp->sense_buffer, ioasa->sense_data,
81 			       SISL_SENSE_DATA_LEN);
82 			scp->result = ioasa->rc.scsi_rc;
83 		} else
84 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
85 	}
86 
87 	/*
88 	 * We encountered an error. Set scp->result based on nature
89 	 * of error.
90 	 */
91 	if (ioasa->rc.fc_rc) {
92 		/* We have an FC status */
93 		switch (ioasa->rc.fc_rc) {
94 		case SISL_FC_RC_LINKDOWN:
95 			scp->result = (DID_REQUEUE << 16);
96 			break;
97 		case SISL_FC_RC_RESID:
98 			/* This indicates an FCP resid underrun */
99 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
100 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
101 				 * then we will handle this error else where.
102 				 * If not then we must handle it here.
103 				 * This is probably an AFU bug.
104 				 */
105 				scp->result = (DID_ERROR << 16);
106 			}
107 			break;
108 		case SISL_FC_RC_RESIDERR:
109 			/* Resid mismatch between adapter and device */
110 		case SISL_FC_RC_TGTABORT:
111 		case SISL_FC_RC_ABORTOK:
112 		case SISL_FC_RC_ABORTFAIL:
113 		case SISL_FC_RC_NOLOGI:
114 		case SISL_FC_RC_ABORTPEND:
115 		case SISL_FC_RC_WRABORTPEND:
116 		case SISL_FC_RC_NOEXP:
117 		case SISL_FC_RC_INUSE:
118 			scp->result = (DID_ERROR << 16);
119 			break;
120 		}
121 	}
122 
123 	if (ioasa->rc.afu_rc) {
124 		/* We have an AFU error */
125 		switch (ioasa->rc.afu_rc) {
126 		case SISL_AFU_RC_NO_CHANNELS:
127 			scp->result = (DID_NO_CONNECT << 16);
128 			break;
129 		case SISL_AFU_RC_DATA_DMA_ERR:
130 			switch (ioasa->afu_extra) {
131 			case SISL_AFU_DMA_ERR_PAGE_IN:
132 				/* Retry */
133 				scp->result = (DID_IMM_RETRY << 16);
134 				break;
135 			case SISL_AFU_DMA_ERR_INVALID_EA:
136 			default:
137 				scp->result = (DID_ERROR << 16);
138 			}
139 			break;
140 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
141 			/* Retry */
142 			scp->result = (DID_ALLOC_FAILURE << 16);
143 			break;
144 		default:
145 			scp->result = (DID_ERROR << 16);
146 		}
147 	}
148 }
149 
150 /**
151  * cmd_complete() - command completion handler
152  * @cmd:	AFU command that has completed.
153  *
154  * Prepares and submits command that has either completed or timed out to
155  * the SCSI stack. Checks AFU command back into command pool for non-internal
156  * (cmd->scp populated) commands.
157  */
158 static void cmd_complete(struct afu_cmd *cmd)
159 {
160 	struct scsi_cmnd *scp;
161 	ulong lock_flags;
162 	struct afu *afu = cmd->parent;
163 	struct cxlflash_cfg *cfg = afu->parent;
164 	struct device *dev = &cfg->dev->dev;
165 	bool cmd_is_tmf;
166 
167 	if (cmd->scp) {
168 		scp = cmd->scp;
169 		if (unlikely(cmd->sa.ioasc))
170 			process_cmd_err(cmd, scp);
171 		else
172 			scp->result = (DID_OK << 16);
173 
174 		cmd_is_tmf = cmd->cmd_tmf;
175 
176 		dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
177 				    __func__, scp, scp->result, cmd->sa.ioasc);
178 
179 		scp->scsi_done(scp);
180 
181 		if (cmd_is_tmf) {
182 			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
183 			cfg->tmf_active = false;
184 			wake_up_all_locked(&cfg->tmf_waitq);
185 			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
186 		}
187 	} else
188 		complete(&cmd->cevent);
189 }
190 
191 /**
192  * context_reset() - reset command owner context via specified register
193  * @cmd:	AFU command that timed out.
194  * @reset_reg:	MMIO register to perform reset.
195  */
196 static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
197 {
198 	int nretry = 0;
199 	u64 rrin = 0x1;
200 	struct afu *afu = cmd->parent;
201 	struct cxlflash_cfg *cfg = afu->parent;
202 	struct device *dev = &cfg->dev->dev;
203 
204 	dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
205 
206 	writeq_be(rrin, reset_reg);
207 	do {
208 		rrin = readq_be(reset_reg);
209 		if (rrin != 0x1)
210 			break;
211 		/* Double delay each time */
212 		udelay(1 << nretry);
213 	} while (nretry++ < MC_ROOM_RETRY_CNT);
214 
215 	dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
216 		__func__, rrin, nretry);
217 }
218 
219 /**
220  * context_reset_ioarrin() - reset command owner context via IOARRIN register
221  * @cmd:	AFU command that timed out.
222  */
223 static void context_reset_ioarrin(struct afu_cmd *cmd)
224 {
225 	struct afu *afu = cmd->parent;
226 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
227 
228 	context_reset(cmd, &hwq->host_map->ioarrin);
229 }
230 
231 /**
232  * context_reset_sq() - reset command owner context w/ SQ Context Reset register
233  * @cmd:	AFU command that timed out.
234  */
235 static void context_reset_sq(struct afu_cmd *cmd)
236 {
237 	struct afu *afu = cmd->parent;
238 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
239 
240 	context_reset(cmd, &hwq->host_map->sq_ctx_reset);
241 }
242 
243 /**
244  * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
245  * @afu:	AFU associated with the host.
246  * @cmd:	AFU command to send.
247  *
248  * Return:
249  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
250  */
251 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
252 {
253 	struct cxlflash_cfg *cfg = afu->parent;
254 	struct device *dev = &cfg->dev->dev;
255 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
256 	int rc = 0;
257 	s64 room;
258 	ulong lock_flags;
259 
260 	/*
261 	 * To avoid the performance penalty of MMIO, spread the update of
262 	 * 'room' over multiple commands.
263 	 */
264 	spin_lock_irqsave(&hwq->rrin_slock, lock_flags);
265 	if (--hwq->room < 0) {
266 		room = readq_be(&hwq->host_map->cmd_room);
267 		if (room <= 0) {
268 			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
269 					    "0x%02X, room=0x%016llX\n",
270 					    __func__, cmd->rcb.cdb[0], room);
271 			hwq->room = 0;
272 			rc = SCSI_MLQUEUE_HOST_BUSY;
273 			goto out;
274 		}
275 		hwq->room = room - 1;
276 	}
277 
278 	writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
279 out:
280 	spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags);
281 	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
282 		cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
283 	return rc;
284 }
285 
286 /**
287  * send_cmd_sq() - sends an AFU command via SQ ring
288  * @afu:	AFU associated with the host.
289  * @cmd:	AFU command to send.
290  *
291  * Return:
292  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
293  */
294 static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
295 {
296 	struct cxlflash_cfg *cfg = afu->parent;
297 	struct device *dev = &cfg->dev->dev;
298 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
299 	int rc = 0;
300 	int newval;
301 	ulong lock_flags;
302 
303 	newval = atomic_dec_if_positive(&hwq->hsq_credits);
304 	if (newval <= 0) {
305 		rc = SCSI_MLQUEUE_HOST_BUSY;
306 		goto out;
307 	}
308 
309 	cmd->rcb.ioasa = &cmd->sa;
310 
311 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
312 
313 	*hwq->hsq_curr = cmd->rcb;
314 	if (hwq->hsq_curr < hwq->hsq_end)
315 		hwq->hsq_curr++;
316 	else
317 		hwq->hsq_curr = hwq->hsq_start;
318 	writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
319 
320 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
321 out:
322 	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
323 	       "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
324 	       cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
325 	       readq_be(&hwq->host_map->sq_head),
326 	       readq_be(&hwq->host_map->sq_tail));
327 	return rc;
328 }
329 
330 /**
331  * wait_resp() - polls for a response or timeout to a sent AFU command
332  * @afu:	AFU associated with the host.
333  * @cmd:	AFU command that was sent.
334  *
335  * Return:
336  *	0 on success, -1 on timeout/error
337  */
338 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
339 {
340 	struct cxlflash_cfg *cfg = afu->parent;
341 	struct device *dev = &cfg->dev->dev;
342 	int rc = 0;
343 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
344 
345 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
346 	if (!timeout) {
347 		afu->context_reset(cmd);
348 		rc = -1;
349 	}
350 
351 	if (unlikely(cmd->sa.ioasc != 0)) {
352 		dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
353 			__func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
354 		rc = -1;
355 	}
356 
357 	return rc;
358 }
359 
360 /**
361  * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
362  * @host:	SCSI host associated with device.
363  * @scp:	SCSI command to send.
364  * @afu:	SCSI command to send.
365  *
366  * Hashes a command based upon the hardware queue mode.
367  *
368  * Return: Trusted index of target hardware queue
369  */
370 static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
371 			     struct afu *afu)
372 {
373 	u32 tag;
374 	u32 hwq = 0;
375 
376 	if (afu->num_hwqs == 1)
377 		return 0;
378 
379 	switch (afu->hwq_mode) {
380 	case HWQ_MODE_RR:
381 		hwq = afu->hwq_rr_count++ % afu->num_hwqs;
382 		break;
383 	case HWQ_MODE_TAG:
384 		tag = blk_mq_unique_tag(scp->request);
385 		hwq = blk_mq_unique_tag_to_hwq(tag);
386 		break;
387 	case HWQ_MODE_CPU:
388 		hwq = smp_processor_id() % afu->num_hwqs;
389 		break;
390 	default:
391 		WARN_ON_ONCE(1);
392 	}
393 
394 	return hwq;
395 }
396 
397 /**
398  * send_tmf() - sends a Task Management Function (TMF)
399  * @afu:	AFU to checkout from.
400  * @scp:	SCSI command from stack.
401  * @tmfcmd:	TMF command to send.
402  *
403  * Return:
404  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
405  */
406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407 {
408 	struct Scsi_Host *host = scp->device->host;
409 	struct cxlflash_cfg *cfg = shost_priv(host);
410 	struct afu_cmd *cmd = sc_to_afucz(scp);
411 	struct device *dev = &cfg->dev->dev;
412 	int hwq_index = cmd_to_target_hwq(host, scp, afu);
413 	struct hwq *hwq = get_hwq(afu, hwq_index);
414 	ulong lock_flags;
415 	int rc = 0;
416 	ulong to;
417 
418 	/* When Task Management Function is active do not send another */
419 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
420 	if (cfg->tmf_active)
421 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
422 						  !cfg->tmf_active,
423 						  cfg->tmf_slock);
424 	cfg->tmf_active = true;
425 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
426 
427 	cmd->scp = scp;
428 	cmd->parent = afu;
429 	cmd->cmd_tmf = true;
430 	cmd->hwq_index = hwq_index;
431 
432 	cmd->rcb.ctx_id = hwq->ctx_hndl;
433 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
434 	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
435 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
436 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
437 			      SISL_REQ_FLAGS_SUP_UNDERRUN |
438 			      SISL_REQ_FLAGS_TMF_CMD);
439 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
440 
441 	rc = afu->send_cmd(afu, cmd);
442 	if (unlikely(rc)) {
443 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
444 		cfg->tmf_active = false;
445 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
446 		goto out;
447 	}
448 
449 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
450 	to = msecs_to_jiffies(5000);
451 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
452 						       !cfg->tmf_active,
453 						       cfg->tmf_slock,
454 						       to);
455 	if (!to) {
456 		cfg->tmf_active = false;
457 		dev_err(dev, "%s: TMF timed out\n", __func__);
458 		rc = -1;
459 	}
460 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
461 out:
462 	return rc;
463 }
464 
465 /**
466  * cxlflash_driver_info() - information handler for this host driver
467  * @host:	SCSI host associated with device.
468  *
469  * Return: A string describing the device.
470  */
471 static const char *cxlflash_driver_info(struct Scsi_Host *host)
472 {
473 	return CXLFLASH_ADAPTER_NAME;
474 }
475 
476 /**
477  * cxlflash_queuecommand() - sends a mid-layer request
478  * @host:	SCSI host associated with device.
479  * @scp:	SCSI command to send.
480  *
481  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
482  */
483 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
484 {
485 	struct cxlflash_cfg *cfg = shost_priv(host);
486 	struct afu *afu = cfg->afu;
487 	struct device *dev = &cfg->dev->dev;
488 	struct afu_cmd *cmd = sc_to_afucz(scp);
489 	struct scatterlist *sg = scsi_sglist(scp);
490 	int hwq_index = cmd_to_target_hwq(host, scp, afu);
491 	struct hwq *hwq = get_hwq(afu, hwq_index);
492 	u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
493 	ulong lock_flags;
494 	int rc = 0;
495 
496 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
497 			    "cdb=(%08x-%08x-%08x-%08x)\n",
498 			    __func__, scp, host->host_no, scp->device->channel,
499 			    scp->device->id, scp->device->lun,
500 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
501 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
502 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
503 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
504 
505 	/*
506 	 * If a Task Management Function is active, wait for it to complete
507 	 * before continuing with regular commands.
508 	 */
509 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
510 	if (cfg->tmf_active) {
511 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
512 		rc = SCSI_MLQUEUE_HOST_BUSY;
513 		goto out;
514 	}
515 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
516 
517 	switch (cfg->state) {
518 	case STATE_PROBING:
519 	case STATE_PROBED:
520 	case STATE_RESET:
521 		dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
522 		rc = SCSI_MLQUEUE_HOST_BUSY;
523 		goto out;
524 	case STATE_FAILTERM:
525 		dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
526 		scp->result = (DID_NO_CONNECT << 16);
527 		scp->scsi_done(scp);
528 		rc = 0;
529 		goto out;
530 	default:
531 		break;
532 	}
533 
534 	if (likely(sg)) {
535 		cmd->rcb.data_len = sg->length;
536 		cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
537 	}
538 
539 	cmd->scp = scp;
540 	cmd->parent = afu;
541 	cmd->hwq_index = hwq_index;
542 
543 	cmd->rcb.ctx_id = hwq->ctx_hndl;
544 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
545 	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
546 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
547 
548 	if (scp->sc_data_direction == DMA_TO_DEVICE)
549 		req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
550 
551 	cmd->rcb.req_flags = req_flags;
552 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
553 
554 	rc = afu->send_cmd(afu, cmd);
555 out:
556 	return rc;
557 }
558 
559 /**
560  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
561  * @cfg:	Internal structure associated with the host.
562  */
563 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
564 {
565 	struct pci_dev *pdev = cfg->dev;
566 
567 	if (pci_channel_offline(pdev))
568 		wait_event_timeout(cfg->reset_waitq,
569 				   !pci_channel_offline(pdev),
570 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
571 }
572 
573 /**
574  * free_mem() - free memory associated with the AFU
575  * @cfg:	Internal structure associated with the host.
576  */
577 static void free_mem(struct cxlflash_cfg *cfg)
578 {
579 	struct afu *afu = cfg->afu;
580 
581 	if (cfg->afu) {
582 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
583 		cfg->afu = NULL;
584 	}
585 }
586 
587 /**
588  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
589  * @cfg:	Internal structure associated with the host.
590  *
591  * Safe to call with AFU in a partially allocated/initialized state.
592  *
593  * Cancels scheduled worker threads, waits for any active internal AFU
594  * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
595  */
596 static void stop_afu(struct cxlflash_cfg *cfg)
597 {
598 	struct afu *afu = cfg->afu;
599 	struct hwq *hwq;
600 	int i;
601 
602 	cancel_work_sync(&cfg->work_q);
603 
604 	if (likely(afu)) {
605 		while (atomic_read(&afu->cmds_active))
606 			ssleep(1);
607 
608 		if (afu_is_irqpoll_enabled(afu)) {
609 			for (i = 0; i < afu->num_hwqs; i++) {
610 				hwq = get_hwq(afu, i);
611 
612 				irq_poll_disable(&hwq->irqpoll);
613 			}
614 		}
615 
616 		if (likely(afu->afu_map)) {
617 			cxl_psa_unmap((void __iomem *)afu->afu_map);
618 			afu->afu_map = NULL;
619 		}
620 	}
621 }
622 
623 /**
624  * term_intr() - disables all AFU interrupts
625  * @cfg:	Internal structure associated with the host.
626  * @level:	Depth of allocation, where to begin waterfall tear down.
627  * @index:	Index of the hardware queue.
628  *
629  * Safe to call with AFU/MC in partially allocated/initialized state.
630  */
631 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
632 		      u32 index)
633 {
634 	struct afu *afu = cfg->afu;
635 	struct device *dev = &cfg->dev->dev;
636 	struct hwq *hwq;
637 
638 	if (!afu) {
639 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
640 		return;
641 	}
642 
643 	hwq = get_hwq(afu, index);
644 
645 	if (!hwq->ctx) {
646 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
647 		return;
648 	}
649 
650 	switch (level) {
651 	case UNMAP_THREE:
652 		/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
653 		if (index == PRIMARY_HWQ)
654 			cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
655 	case UNMAP_TWO:
656 		cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
657 	case UNMAP_ONE:
658 		cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
659 	case FREE_IRQ:
660 		cxl_free_afu_irqs(hwq->ctx);
661 		/* fall through */
662 	case UNDO_NOOP:
663 		/* No action required */
664 		break;
665 	}
666 }
667 
668 /**
669  * term_mc() - terminates the master context
670  * @cfg:	Internal structure associated with the host.
671  * @index:	Index of the hardware queue.
672  *
673  * Safe to call with AFU/MC in partially allocated/initialized state.
674  */
675 static void term_mc(struct cxlflash_cfg *cfg, u32 index)
676 {
677 	struct afu *afu = cfg->afu;
678 	struct device *dev = &cfg->dev->dev;
679 	struct hwq *hwq;
680 
681 	if (!afu) {
682 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
683 		return;
684 	}
685 
686 	hwq = get_hwq(afu, index);
687 
688 	if (!hwq->ctx) {
689 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
690 		return;
691 	}
692 
693 	WARN_ON(cxl_stop_context(hwq->ctx));
694 	if (index != PRIMARY_HWQ)
695 		WARN_ON(cxl_release_context(hwq->ctx));
696 	hwq->ctx = NULL;
697 }
698 
699 /**
700  * term_afu() - terminates the AFU
701  * @cfg:	Internal structure associated with the host.
702  *
703  * Safe to call with AFU/MC in partially allocated/initialized state.
704  */
705 static void term_afu(struct cxlflash_cfg *cfg)
706 {
707 	struct device *dev = &cfg->dev->dev;
708 	int k;
709 
710 	/*
711 	 * Tear down is carefully orchestrated to ensure
712 	 * no interrupts can come in when the problem state
713 	 * area is unmapped.
714 	 *
715 	 * 1) Disable all AFU interrupts for each master
716 	 * 2) Unmap the problem state area
717 	 * 3) Stop each master context
718 	 */
719 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
720 		term_intr(cfg, UNMAP_THREE, k);
721 
722 	if (cfg->afu)
723 		stop_afu(cfg);
724 
725 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
726 		term_mc(cfg, k);
727 
728 	dev_dbg(dev, "%s: returning\n", __func__);
729 }
730 
731 /**
732  * notify_shutdown() - notifies device of pending shutdown
733  * @cfg:	Internal structure associated with the host.
734  * @wait:	Whether to wait for shutdown processing to complete.
735  *
736  * This function will notify the AFU that the adapter is being shutdown
737  * and will wait for shutdown processing to complete if wait is true.
738  * This notification should flush pending I/Os to the device and halt
739  * further I/Os until the next AFU reset is issued and device restarted.
740  */
741 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
742 {
743 	struct afu *afu = cfg->afu;
744 	struct device *dev = &cfg->dev->dev;
745 	struct dev_dependent_vals *ddv;
746 	__be64 __iomem *fc_port_regs;
747 	u64 reg, status;
748 	int i, retry_cnt = 0;
749 
750 	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
751 	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
752 		return;
753 
754 	if (!afu || !afu->afu_map) {
755 		dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
756 		return;
757 	}
758 
759 	/* Notify AFU */
760 	for (i = 0; i < cfg->num_fc_ports; i++) {
761 		fc_port_regs = get_fc_port_regs(cfg, i);
762 
763 		reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
764 		reg |= SISL_FC_SHUTDOWN_NORMAL;
765 		writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
766 	}
767 
768 	if (!wait)
769 		return;
770 
771 	/* Wait up to 1.5 seconds for shutdown processing to complete */
772 	for (i = 0; i < cfg->num_fc_ports; i++) {
773 		fc_port_regs = get_fc_port_regs(cfg, i);
774 		retry_cnt = 0;
775 
776 		while (true) {
777 			status = readq_be(&fc_port_regs[FC_STATUS / 8]);
778 			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
779 				break;
780 			if (++retry_cnt >= MC_RETRY_CNT) {
781 				dev_dbg(dev, "%s: port %d shutdown processing "
782 					"not yet completed\n", __func__, i);
783 				break;
784 			}
785 			msleep(100 * retry_cnt);
786 		}
787 	}
788 }
789 
790 /**
791  * cxlflash_remove() - PCI entry point to tear down host
792  * @pdev:	PCI device associated with the host.
793  *
794  * Safe to use as a cleanup in partially allocated/initialized state. Note that
795  * the reset_waitq is flushed as part of the stop/termination of user contexts.
796  */
797 static void cxlflash_remove(struct pci_dev *pdev)
798 {
799 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
800 	struct device *dev = &pdev->dev;
801 	ulong lock_flags;
802 
803 	if (!pci_is_enabled(pdev)) {
804 		dev_dbg(dev, "%s: Device is disabled\n", __func__);
805 		return;
806 	}
807 
808 	/* If a Task Management Function is active, wait for it to complete
809 	 * before continuing with remove.
810 	 */
811 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
812 	if (cfg->tmf_active)
813 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
814 						  !cfg->tmf_active,
815 						  cfg->tmf_slock);
816 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
817 
818 	/* Notify AFU and wait for shutdown processing to complete */
819 	notify_shutdown(cfg, true);
820 
821 	cfg->state = STATE_FAILTERM;
822 	cxlflash_stop_term_user_contexts(cfg);
823 
824 	switch (cfg->init_state) {
825 	case INIT_STATE_SCSI:
826 		cxlflash_term_local_luns(cfg);
827 		scsi_remove_host(cfg->host);
828 	case INIT_STATE_AFU:
829 		term_afu(cfg);
830 	case INIT_STATE_PCI:
831 		pci_disable_device(pdev);
832 	case INIT_STATE_NONE:
833 		free_mem(cfg);
834 		scsi_host_put(cfg->host);
835 		break;
836 	}
837 
838 	dev_dbg(dev, "%s: returning\n", __func__);
839 }
840 
841 /**
842  * alloc_mem() - allocates the AFU and its command pool
843  * @cfg:	Internal structure associated with the host.
844  *
845  * A partially allocated state remains on failure.
846  *
847  * Return:
848  *	0 on success
849  *	-ENOMEM on failure to allocate memory
850  */
851 static int alloc_mem(struct cxlflash_cfg *cfg)
852 {
853 	int rc = 0;
854 	struct device *dev = &cfg->dev->dev;
855 
856 	/* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
857 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
858 					    get_order(sizeof(struct afu)));
859 	if (unlikely(!cfg->afu)) {
860 		dev_err(dev, "%s: cannot get %d free pages\n",
861 			__func__, get_order(sizeof(struct afu)));
862 		rc = -ENOMEM;
863 		goto out;
864 	}
865 	cfg->afu->parent = cfg;
866 	cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
867 	cfg->afu->afu_map = NULL;
868 out:
869 	return rc;
870 }
871 
872 /**
873  * init_pci() - initializes the host as a PCI device
874  * @cfg:	Internal structure associated with the host.
875  *
876  * Return: 0 on success, -errno on failure
877  */
878 static int init_pci(struct cxlflash_cfg *cfg)
879 {
880 	struct pci_dev *pdev = cfg->dev;
881 	struct device *dev = &cfg->dev->dev;
882 	int rc = 0;
883 
884 	rc = pci_enable_device(pdev);
885 	if (rc || pci_channel_offline(pdev)) {
886 		if (pci_channel_offline(pdev)) {
887 			cxlflash_wait_for_pci_err_recovery(cfg);
888 			rc = pci_enable_device(pdev);
889 		}
890 
891 		if (rc) {
892 			dev_err(dev, "%s: Cannot enable adapter\n", __func__);
893 			cxlflash_wait_for_pci_err_recovery(cfg);
894 			goto out;
895 		}
896 	}
897 
898 out:
899 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
900 	return rc;
901 }
902 
903 /**
904  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
905  * @cfg:	Internal structure associated with the host.
906  *
907  * Return: 0 on success, -errno on failure
908  */
909 static int init_scsi(struct cxlflash_cfg *cfg)
910 {
911 	struct pci_dev *pdev = cfg->dev;
912 	struct device *dev = &cfg->dev->dev;
913 	int rc = 0;
914 
915 	rc = scsi_add_host(cfg->host, &pdev->dev);
916 	if (rc) {
917 		dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
918 		goto out;
919 	}
920 
921 	scsi_scan_host(cfg->host);
922 
923 out:
924 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
925 	return rc;
926 }
927 
928 /**
929  * set_port_online() - transitions the specified host FC port to online state
930  * @fc_regs:	Top of MMIO region defined for specified port.
931  *
932  * The provided MMIO region must be mapped prior to call. Online state means
933  * that the FC link layer has synced, completed the handshaking process, and
934  * is ready for login to start.
935  */
936 static void set_port_online(__be64 __iomem *fc_regs)
937 {
938 	u64 cmdcfg;
939 
940 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
941 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
942 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
943 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
944 }
945 
946 /**
947  * set_port_offline() - transitions the specified host FC port to offline state
948  * @fc_regs:	Top of MMIO region defined for specified port.
949  *
950  * The provided MMIO region must be mapped prior to call.
951  */
952 static void set_port_offline(__be64 __iomem *fc_regs)
953 {
954 	u64 cmdcfg;
955 
956 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
957 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
958 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
959 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
960 }
961 
962 /**
963  * wait_port_online() - waits for the specified host FC port come online
964  * @fc_regs:	Top of MMIO region defined for specified port.
965  * @delay_us:	Number of microseconds to delay between reading port status.
966  * @nretry:	Number of cycles to retry reading port status.
967  *
968  * The provided MMIO region must be mapped prior to call. This will timeout
969  * when the cable is not plugged in.
970  *
971  * Return:
972  *	TRUE (1) when the specified port is online
973  *	FALSE (0) when the specified port fails to come online after timeout
974  */
975 static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
976 {
977 	u64 status;
978 
979 	WARN_ON(delay_us < 1000);
980 
981 	do {
982 		msleep(delay_us / 1000);
983 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
984 		if (status == U64_MAX)
985 			nretry /= 2;
986 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
987 		 nretry--);
988 
989 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
990 }
991 
992 /**
993  * wait_port_offline() - waits for the specified host FC port go offline
994  * @fc_regs:	Top of MMIO region defined for specified port.
995  * @delay_us:	Number of microseconds to delay between reading port status.
996  * @nretry:	Number of cycles to retry reading port status.
997  *
998  * The provided MMIO region must be mapped prior to call.
999  *
1000  * Return:
1001  *	TRUE (1) when the specified port is offline
1002  *	FALSE (0) when the specified port fails to go offline after timeout
1003  */
1004 static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1005 {
1006 	u64 status;
1007 
1008 	WARN_ON(delay_us < 1000);
1009 
1010 	do {
1011 		msleep(delay_us / 1000);
1012 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1013 		if (status == U64_MAX)
1014 			nretry /= 2;
1015 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1016 		 nretry--);
1017 
1018 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1019 }
1020 
1021 /**
1022  * afu_set_wwpn() - configures the WWPN for the specified host FC port
1023  * @afu:	AFU associated with the host that owns the specified FC port.
1024  * @port:	Port number being configured.
1025  * @fc_regs:	Top of MMIO region defined for specified port.
1026  * @wwpn:	The world-wide-port-number previously discovered for port.
1027  *
1028  * The provided MMIO region must be mapped prior to call. As part of the
1029  * sequence to configure the WWPN, the port is toggled offline and then back
1030  * online. This toggling action can cause this routine to delay up to a few
1031  * seconds. When configured to use the internal LUN feature of the AFU, a
1032  * failure to come online is overridden.
1033  */
1034 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1035 			 u64 wwpn)
1036 {
1037 	struct cxlflash_cfg *cfg = afu->parent;
1038 	struct device *dev = &cfg->dev->dev;
1039 
1040 	set_port_offline(fc_regs);
1041 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1042 			       FC_PORT_STATUS_RETRY_CNT)) {
1043 		dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1044 			__func__, port);
1045 	}
1046 
1047 	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1048 
1049 	set_port_online(fc_regs);
1050 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1051 			      FC_PORT_STATUS_RETRY_CNT)) {
1052 		dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1053 			__func__, port);
1054 	}
1055 }
1056 
1057 /**
1058  * afu_link_reset() - resets the specified host FC port
1059  * @afu:	AFU associated with the host that owns the specified FC port.
1060  * @port:	Port number being configured.
1061  * @fc_regs:	Top of MMIO region defined for specified port.
1062  *
1063  * The provided MMIO region must be mapped prior to call. The sequence to
1064  * reset the port involves toggling it offline and then back online. This
1065  * action can cause this routine to delay up to a few seconds. An effort
1066  * is made to maintain link with the device by switching to host to use
1067  * the alternate port exclusively while the reset takes place.
1068  * failure to come online is overridden.
1069  */
1070 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1071 {
1072 	struct cxlflash_cfg *cfg = afu->parent;
1073 	struct device *dev = &cfg->dev->dev;
1074 	u64 port_sel;
1075 
1076 	/* first switch the AFU to the other links, if any */
1077 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1078 	port_sel &= ~(1ULL << port);
1079 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1080 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1081 
1082 	set_port_offline(fc_regs);
1083 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1084 			       FC_PORT_STATUS_RETRY_CNT))
1085 		dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1086 			__func__, port);
1087 
1088 	set_port_online(fc_regs);
1089 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1090 			      FC_PORT_STATUS_RETRY_CNT))
1091 		dev_err(dev, "%s: wait on port %d to go online timed out\n",
1092 			__func__, port);
1093 
1094 	/* switch back to include this port */
1095 	port_sel |= (1ULL << port);
1096 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1097 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1098 
1099 	dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1100 }
1101 
1102 /**
1103  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1104  * @afu:	AFU associated with the host.
1105  */
1106 static void afu_err_intr_init(struct afu *afu)
1107 {
1108 	struct cxlflash_cfg *cfg = afu->parent;
1109 	__be64 __iomem *fc_port_regs;
1110 	int i;
1111 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1112 	u64 reg;
1113 
1114 	/* global async interrupts: AFU clears afu_ctrl on context exit
1115 	 * if async interrupts were sent to that context. This prevents
1116 	 * the AFU form sending further async interrupts when
1117 	 * there is
1118 	 * nobody to receive them.
1119 	 */
1120 
1121 	/* mask all */
1122 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1123 	/* set LISN# to send and point to primary master context */
1124 	reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1125 
1126 	if (afu->internal_lun)
1127 		reg |= 1;	/* Bit 63 indicates local lun */
1128 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1129 	/* clear all */
1130 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1131 	/* unmask bits that are of interest */
1132 	/* note: afu can send an interrupt after this step */
1133 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1134 	/* clear again in case a bit came on after previous clear but before */
1135 	/* unmask */
1136 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1137 
1138 	/* Clear/Set internal lun bits */
1139 	fc_port_regs = get_fc_port_regs(cfg, 0);
1140 	reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1141 	reg &= SISL_FC_INTERNAL_MASK;
1142 	if (afu->internal_lun)
1143 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1144 	writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1145 
1146 	/* now clear FC errors */
1147 	for (i = 0; i < cfg->num_fc_ports; i++) {
1148 		fc_port_regs = get_fc_port_regs(cfg, i);
1149 
1150 		writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1151 		writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1152 	}
1153 
1154 	/* sync interrupts for master's IOARRIN write */
1155 	/* note that unlike asyncs, there can be no pending sync interrupts */
1156 	/* at this time (this is a fresh context and master has not written */
1157 	/* IOARRIN yet), so there is nothing to clear. */
1158 
1159 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
1160 	for (i = 0; i < afu->num_hwqs; i++) {
1161 		hwq = get_hwq(afu, i);
1162 
1163 		writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
1164 		writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1165 	}
1166 }
1167 
1168 /**
1169  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1170  * @irq:	Interrupt number.
1171  * @data:	Private data provided at interrupt registration, the AFU.
1172  *
1173  * Return: Always return IRQ_HANDLED.
1174  */
1175 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1176 {
1177 	struct hwq *hwq = (struct hwq *)data;
1178 	struct cxlflash_cfg *cfg = hwq->afu->parent;
1179 	struct device *dev = &cfg->dev->dev;
1180 	u64 reg;
1181 	u64 reg_unmasked;
1182 
1183 	reg = readq_be(&hwq->host_map->intr_status);
1184 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1185 
1186 	if (reg_unmasked == 0UL) {
1187 		dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1188 			__func__, reg);
1189 		goto cxlflash_sync_err_irq_exit;
1190 	}
1191 
1192 	dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1193 		__func__, reg);
1194 
1195 	writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1196 
1197 cxlflash_sync_err_irq_exit:
1198 	return IRQ_HANDLED;
1199 }
1200 
1201 /**
1202  * process_hrrq() - process the read-response queue
1203  * @afu:	AFU associated with the host.
1204  * @doneq:	Queue of commands harvested from the RRQ.
1205  * @budget:	Threshold of RRQ entries to process.
1206  *
1207  * This routine must be called holding the disabled RRQ spin lock.
1208  *
1209  * Return: The number of entries processed.
1210  */
1211 static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1212 {
1213 	struct afu *afu = hwq->afu;
1214 	struct afu_cmd *cmd;
1215 	struct sisl_ioasa *ioasa;
1216 	struct sisl_ioarcb *ioarcb;
1217 	bool toggle = hwq->toggle;
1218 	int num_hrrq = 0;
1219 	u64 entry,
1220 	    *hrrq_start = hwq->hrrq_start,
1221 	    *hrrq_end = hwq->hrrq_end,
1222 	    *hrrq_curr = hwq->hrrq_curr;
1223 
1224 	/* Process ready RRQ entries up to the specified budget (if any) */
1225 	while (true) {
1226 		entry = *hrrq_curr;
1227 
1228 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1229 			break;
1230 
1231 		entry &= ~SISL_RESP_HANDLE_T_BIT;
1232 
1233 		if (afu_is_sq_cmd_mode(afu)) {
1234 			ioasa = (struct sisl_ioasa *)entry;
1235 			cmd = container_of(ioasa, struct afu_cmd, sa);
1236 		} else {
1237 			ioarcb = (struct sisl_ioarcb *)entry;
1238 			cmd = container_of(ioarcb, struct afu_cmd, rcb);
1239 		}
1240 
1241 		list_add_tail(&cmd->queue, doneq);
1242 
1243 		/* Advance to next entry or wrap and flip the toggle bit */
1244 		if (hrrq_curr < hrrq_end)
1245 			hrrq_curr++;
1246 		else {
1247 			hrrq_curr = hrrq_start;
1248 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1249 		}
1250 
1251 		atomic_inc(&hwq->hsq_credits);
1252 		num_hrrq++;
1253 
1254 		if (budget > 0 && num_hrrq >= budget)
1255 			break;
1256 	}
1257 
1258 	hwq->hrrq_curr = hrrq_curr;
1259 	hwq->toggle = toggle;
1260 
1261 	return num_hrrq;
1262 }
1263 
1264 /**
1265  * process_cmd_doneq() - process a queue of harvested RRQ commands
1266  * @doneq:	Queue of completed commands.
1267  *
1268  * Note that upon return the queue can no longer be trusted.
1269  */
1270 static void process_cmd_doneq(struct list_head *doneq)
1271 {
1272 	struct afu_cmd *cmd, *tmp;
1273 
1274 	WARN_ON(list_empty(doneq));
1275 
1276 	list_for_each_entry_safe(cmd, tmp, doneq, queue)
1277 		cmd_complete(cmd);
1278 }
1279 
1280 /**
1281  * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1282  * @irqpoll:	IRQ poll structure associated with queue to poll.
1283  * @budget:	Threshold of RRQ entries to process per poll.
1284  *
1285  * Return: The number of entries processed.
1286  */
1287 static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1288 {
1289 	struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1290 	unsigned long hrrq_flags;
1291 	LIST_HEAD(doneq);
1292 	int num_entries = 0;
1293 
1294 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1295 
1296 	num_entries = process_hrrq(hwq, &doneq, budget);
1297 	if (num_entries < budget)
1298 		irq_poll_complete(irqpoll);
1299 
1300 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1301 
1302 	process_cmd_doneq(&doneq);
1303 	return num_entries;
1304 }
1305 
1306 /**
1307  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1308  * @irq:	Interrupt number.
1309  * @data:	Private data provided at interrupt registration, the AFU.
1310  *
1311  * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1312  */
1313 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1314 {
1315 	struct hwq *hwq = (struct hwq *)data;
1316 	struct afu *afu = hwq->afu;
1317 	unsigned long hrrq_flags;
1318 	LIST_HEAD(doneq);
1319 	int num_entries = 0;
1320 
1321 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1322 
1323 	if (afu_is_irqpoll_enabled(afu)) {
1324 		irq_poll_sched(&hwq->irqpoll);
1325 		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1326 		return IRQ_HANDLED;
1327 	}
1328 
1329 	num_entries = process_hrrq(hwq, &doneq, -1);
1330 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1331 
1332 	if (num_entries == 0)
1333 		return IRQ_NONE;
1334 
1335 	process_cmd_doneq(&doneq);
1336 	return IRQ_HANDLED;
1337 }
1338 
1339 /*
1340  * Asynchronous interrupt information table
1341  *
1342  * NOTE:
1343  *	- Order matters here as this array is indexed by bit position.
1344  *
1345  *	- The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1346  *	  as complex and complains due to a lack of parentheses/braces.
1347  */
1348 #define ASTATUS_FC(_a, _b, _c, _d)					 \
1349 	{ SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1350 
1351 #define BUILD_SISL_ASTATUS_FC_PORT(_a)					 \
1352 	ASTATUS_FC(_a, LINK_UP, "link up", 0),				 \
1353 	ASTATUS_FC(_a, LINK_DN, "link down", 0),			 \
1354 	ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),		 \
1355 	ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),		 \
1356 	ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1357 	ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),	 \
1358 	ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),		 \
1359 	ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1360 
1361 static const struct asyc_intr_info ainfo[] = {
1362 	BUILD_SISL_ASTATUS_FC_PORT(1),
1363 	BUILD_SISL_ASTATUS_FC_PORT(0),
1364 	BUILD_SISL_ASTATUS_FC_PORT(3),
1365 	BUILD_SISL_ASTATUS_FC_PORT(2)
1366 };
1367 
1368 /**
1369  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1370  * @irq:	Interrupt number.
1371  * @data:	Private data provided at interrupt registration, the AFU.
1372  *
1373  * Return: Always return IRQ_HANDLED.
1374  */
1375 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1376 {
1377 	struct hwq *hwq = (struct hwq *)data;
1378 	struct afu *afu = hwq->afu;
1379 	struct cxlflash_cfg *cfg = afu->parent;
1380 	struct device *dev = &cfg->dev->dev;
1381 	const struct asyc_intr_info *info;
1382 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1383 	__be64 __iomem *fc_port_regs;
1384 	u64 reg_unmasked;
1385 	u64 reg;
1386 	u64 bit;
1387 	u8 port;
1388 
1389 	reg = readq_be(&global->regs.aintr_status);
1390 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1391 
1392 	if (unlikely(reg_unmasked == 0)) {
1393 		dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1394 			__func__, reg);
1395 		goto out;
1396 	}
1397 
1398 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1399 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1400 
1401 	/* Check each bit that is on */
1402 	for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1403 		if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1404 			WARN_ON_ONCE(1);
1405 			continue;
1406 		}
1407 
1408 		info = &ainfo[bit];
1409 		if (unlikely(info->status != 1ULL << bit)) {
1410 			WARN_ON_ONCE(1);
1411 			continue;
1412 		}
1413 
1414 		port = info->port;
1415 		fc_port_regs = get_fc_port_regs(cfg, port);
1416 
1417 		dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1418 			__func__, port, info->desc,
1419 		       readq_be(&fc_port_regs[FC_STATUS / 8]));
1420 
1421 		/*
1422 		 * Do link reset first, some OTHER errors will set FC_ERROR
1423 		 * again if cleared before or w/o a reset
1424 		 */
1425 		if (info->action & LINK_RESET) {
1426 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1427 				__func__, port);
1428 			cfg->lr_state = LINK_RESET_REQUIRED;
1429 			cfg->lr_port = port;
1430 			schedule_work(&cfg->work_q);
1431 		}
1432 
1433 		if (info->action & CLR_FC_ERROR) {
1434 			reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1435 
1436 			/*
1437 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1438 			 * should be the same and tracing one is sufficient.
1439 			 */
1440 
1441 			dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1442 				__func__, port, reg);
1443 
1444 			writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1445 			writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1446 		}
1447 
1448 		if (info->action & SCAN_HOST) {
1449 			atomic_inc(&cfg->scan_host_needed);
1450 			schedule_work(&cfg->work_q);
1451 		}
1452 	}
1453 
1454 out:
1455 	return IRQ_HANDLED;
1456 }
1457 
1458 /**
1459  * start_context() - starts the master context
1460  * @cfg:	Internal structure associated with the host.
1461  * @index:	Index of the hardware queue.
1462  *
1463  * Return: A success or failure value from CXL services.
1464  */
1465 static int start_context(struct cxlflash_cfg *cfg, u32 index)
1466 {
1467 	struct device *dev = &cfg->dev->dev;
1468 	struct hwq *hwq = get_hwq(cfg->afu, index);
1469 	int rc = 0;
1470 
1471 	rc = cxl_start_context(hwq->ctx,
1472 			       hwq->work.work_element_descriptor,
1473 			       NULL);
1474 
1475 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1476 	return rc;
1477 }
1478 
1479 /**
1480  * read_vpd() - obtains the WWPNs from VPD
1481  * @cfg:	Internal structure associated with the host.
1482  * @wwpn:	Array of size MAX_FC_PORTS to pass back WWPNs
1483  *
1484  * Return: 0 on success, -errno on failure
1485  */
1486 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1487 {
1488 	struct device *dev = &cfg->dev->dev;
1489 	struct pci_dev *pdev = cfg->dev;
1490 	int rc = 0;
1491 	int ro_start, ro_size, i, j, k;
1492 	ssize_t vpd_size;
1493 	char vpd_data[CXLFLASH_VPD_LEN];
1494 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
1495 	char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1496 
1497 	/* Get the VPD data from the device */
1498 	vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1499 	if (unlikely(vpd_size <= 0)) {
1500 		dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1501 			__func__, vpd_size);
1502 		rc = -ENODEV;
1503 		goto out;
1504 	}
1505 
1506 	/* Get the read only section offset */
1507 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1508 				    PCI_VPD_LRDT_RO_DATA);
1509 	if (unlikely(ro_start < 0)) {
1510 		dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1511 		rc = -ENODEV;
1512 		goto out;
1513 	}
1514 
1515 	/* Get the read only section size, cap when extends beyond read VPD */
1516 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1517 	j = ro_size;
1518 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1519 	if (unlikely((i + j) > vpd_size)) {
1520 		dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1521 			__func__, (i + j), vpd_size);
1522 		ro_size = vpd_size - i;
1523 	}
1524 
1525 	/*
1526 	 * Find the offset of the WWPN tag within the read only
1527 	 * VPD data and validate the found field (partials are
1528 	 * no good to us). Convert the ASCII data to an integer
1529 	 * value. Note that we must copy to a temporary buffer
1530 	 * because the conversion service requires that the ASCII
1531 	 * string be terminated.
1532 	 */
1533 	for (k = 0; k < cfg->num_fc_ports; k++) {
1534 		j = ro_size;
1535 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1536 
1537 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1538 		if (unlikely(i < 0)) {
1539 			dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
1540 				__func__, k);
1541 			rc = -ENODEV;
1542 			goto out;
1543 		}
1544 
1545 		j = pci_vpd_info_field_size(&vpd_data[i]);
1546 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1547 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1548 			dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1549 				__func__, k);
1550 			rc = -ENODEV;
1551 			goto out;
1552 		}
1553 
1554 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1555 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1556 		if (unlikely(rc)) {
1557 			dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1558 				__func__, k);
1559 			rc = -ENODEV;
1560 			goto out;
1561 		}
1562 
1563 		dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1564 	}
1565 
1566 out:
1567 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1568 	return rc;
1569 }
1570 
1571 /**
1572  * init_pcr() - initialize the provisioning and control registers
1573  * @cfg:	Internal structure associated with the host.
1574  *
1575  * Also sets up fast access to the mapped registers and initializes AFU
1576  * command fields that never change.
1577  */
1578 static void init_pcr(struct cxlflash_cfg *cfg)
1579 {
1580 	struct afu *afu = cfg->afu;
1581 	struct sisl_ctrl_map __iomem *ctrl_map;
1582 	struct hwq *hwq;
1583 	int i;
1584 
1585 	for (i = 0; i < MAX_CONTEXT; i++) {
1586 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1587 		/* Disrupt any clients that could be running */
1588 		/* e.g. clients that survived a master restart */
1589 		writeq_be(0, &ctrl_map->rht_start);
1590 		writeq_be(0, &ctrl_map->rht_cnt_id);
1591 		writeq_be(0, &ctrl_map->ctx_cap);
1592 	}
1593 
1594 	/* Copy frequently used fields into hwq */
1595 	for (i = 0; i < afu->num_hwqs; i++) {
1596 		hwq = get_hwq(afu, i);
1597 
1598 		hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
1599 		hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1600 		hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1601 
1602 		/* Program the Endian Control for the master context */
1603 		writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1604 	}
1605 }
1606 
1607 /**
1608  * init_global() - initialize AFU global registers
1609  * @cfg:	Internal structure associated with the host.
1610  */
1611 static int init_global(struct cxlflash_cfg *cfg)
1612 {
1613 	struct afu *afu = cfg->afu;
1614 	struct device *dev = &cfg->dev->dev;
1615 	struct hwq *hwq;
1616 	struct sisl_host_map __iomem *hmap;
1617 	__be64 __iomem *fc_port_regs;
1618 	u64 wwpn[MAX_FC_PORTS];	/* wwpn of AFU ports */
1619 	int i = 0, num_ports = 0;
1620 	int rc = 0;
1621 	u64 reg;
1622 
1623 	rc = read_vpd(cfg, &wwpn[0]);
1624 	if (rc) {
1625 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1626 		goto out;
1627 	}
1628 
1629 	/* Set up RRQ and SQ in HWQ for master issued cmds */
1630 	for (i = 0; i < afu->num_hwqs; i++) {
1631 		hwq = get_hwq(afu, i);
1632 		hmap = hwq->host_map;
1633 
1634 		writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1635 		writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1636 
1637 		if (afu_is_sq_cmd_mode(afu)) {
1638 			writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1639 			writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1640 		}
1641 	}
1642 
1643 	/* AFU configuration */
1644 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1645 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1646 	/* enable all auto retry options and control endianness */
1647 	/* leave others at default: */
1648 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1649 	/* checker on if dual afu */
1650 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1651 
1652 	/* Global port select: select either port */
1653 	if (afu->internal_lun) {
1654 		/* Only use port 0 */
1655 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1656 		num_ports = 0;
1657 	} else {
1658 		writeq_be(PORT_MASK(cfg->num_fc_ports),
1659 			  &afu->afu_map->global.regs.afu_port_sel);
1660 		num_ports = cfg->num_fc_ports;
1661 	}
1662 
1663 	for (i = 0; i < num_ports; i++) {
1664 		fc_port_regs = get_fc_port_regs(cfg, i);
1665 
1666 		/* Unmask all errors (but they are still masked at AFU) */
1667 		writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1668 		/* Clear CRC error cnt & set a threshold */
1669 		(void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1670 		writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1671 
1672 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1673 		if (wwpn[i] != 0)
1674 			afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1675 		/* Programming WWPN back to back causes additional
1676 		 * offline/online transitions and a PLOGI
1677 		 */
1678 		msleep(100);
1679 	}
1680 
1681 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1682 	/* tables, afu cmds and read/write GSCSI cmds. */
1683 	/* First, unlock ctx_cap write by reading mbox */
1684 	for (i = 0; i < afu->num_hwqs; i++) {
1685 		hwq = get_hwq(afu, i);
1686 
1687 		(void)readq_be(&hwq->ctrl_map->mbox_r);	/* unlock ctx_cap */
1688 		writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1689 			SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1690 			SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1691 			&hwq->ctrl_map->ctx_cap);
1692 	}
1693 	/* Initialize heartbeat */
1694 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1695 out:
1696 	return rc;
1697 }
1698 
1699 /**
1700  * start_afu() - initializes and starts the AFU
1701  * @cfg:	Internal structure associated with the host.
1702  */
1703 static int start_afu(struct cxlflash_cfg *cfg)
1704 {
1705 	struct afu *afu = cfg->afu;
1706 	struct device *dev = &cfg->dev->dev;
1707 	struct hwq *hwq;
1708 	int rc = 0;
1709 	int i;
1710 
1711 	init_pcr(cfg);
1712 
1713 	/* Initialize each HWQ */
1714 	for (i = 0; i < afu->num_hwqs; i++) {
1715 		hwq = get_hwq(afu, i);
1716 
1717 		/* After an AFU reset, RRQ entries are stale, clear them */
1718 		memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1719 
1720 		/* Initialize RRQ pointers */
1721 		hwq->hrrq_start = &hwq->rrq_entry[0];
1722 		hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1723 		hwq->hrrq_curr = hwq->hrrq_start;
1724 		hwq->toggle = 1;
1725 		spin_lock_init(&hwq->hrrq_slock);
1726 
1727 		/* Initialize SQ */
1728 		if (afu_is_sq_cmd_mode(afu)) {
1729 			memset(&hwq->sq, 0, sizeof(hwq->sq));
1730 			hwq->hsq_start = &hwq->sq[0];
1731 			hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1732 			hwq->hsq_curr = hwq->hsq_start;
1733 
1734 			spin_lock_init(&hwq->hsq_slock);
1735 			atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1736 		}
1737 
1738 		/* Initialize IRQ poll */
1739 		if (afu_is_irqpoll_enabled(afu))
1740 			irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1741 				      cxlflash_irqpoll);
1742 
1743 	}
1744 
1745 	rc = init_global(cfg);
1746 
1747 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1748 	return rc;
1749 }
1750 
1751 /**
1752  * init_intr() - setup interrupt handlers for the master context
1753  * @cfg:	Internal structure associated with the host.
1754  * @hwq:	Hardware queue to initialize.
1755  *
1756  * Return: 0 on success, -errno on failure
1757  */
1758 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1759 				 struct hwq *hwq)
1760 {
1761 	struct device *dev = &cfg->dev->dev;
1762 	struct cxl_context *ctx = hwq->ctx;
1763 	int rc = 0;
1764 	enum undo_level level = UNDO_NOOP;
1765 	bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1766 	int num_irqs = is_primary_hwq ? 3 : 2;
1767 
1768 	rc = cxl_allocate_afu_irqs(ctx, num_irqs);
1769 	if (unlikely(rc)) {
1770 		dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1771 			__func__, rc);
1772 		level = UNDO_NOOP;
1773 		goto out;
1774 	}
1775 
1776 	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1777 			     "SISL_MSI_SYNC_ERROR");
1778 	if (unlikely(rc <= 0)) {
1779 		dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1780 		level = FREE_IRQ;
1781 		goto out;
1782 	}
1783 
1784 	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1785 			     "SISL_MSI_RRQ_UPDATED");
1786 	if (unlikely(rc <= 0)) {
1787 		dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1788 		level = UNMAP_ONE;
1789 		goto out;
1790 	}
1791 
1792 	/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1793 	if (!is_primary_hwq)
1794 		goto out;
1795 
1796 	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1797 			     "SISL_MSI_ASYNC_ERROR");
1798 	if (unlikely(rc <= 0)) {
1799 		dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1800 		level = UNMAP_TWO;
1801 		goto out;
1802 	}
1803 out:
1804 	return level;
1805 }
1806 
1807 /**
1808  * init_mc() - create and register as the master context
1809  * @cfg:	Internal structure associated with the host.
1810  * index:	HWQ Index of the master context.
1811  *
1812  * Return: 0 on success, -errno on failure
1813  */
1814 static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1815 {
1816 	struct cxl_context *ctx;
1817 	struct device *dev = &cfg->dev->dev;
1818 	struct hwq *hwq = get_hwq(cfg->afu, index);
1819 	int rc = 0;
1820 	enum undo_level level;
1821 
1822 	hwq->afu = cfg->afu;
1823 	hwq->index = index;
1824 
1825 	if (index == PRIMARY_HWQ)
1826 		ctx = cxl_get_context(cfg->dev);
1827 	else
1828 		ctx = cxl_dev_context_init(cfg->dev);
1829 	if (unlikely(!ctx)) {
1830 		rc = -ENOMEM;
1831 		goto err1;
1832 	}
1833 
1834 	WARN_ON(hwq->ctx);
1835 	hwq->ctx = ctx;
1836 
1837 	/* Set it up as a master with the CXL */
1838 	cxl_set_master(ctx);
1839 
1840 	/* Reset AFU when initializing primary context */
1841 	if (index == PRIMARY_HWQ) {
1842 		rc = cxl_afu_reset(ctx);
1843 		if (unlikely(rc)) {
1844 			dev_err(dev, "%s: AFU reset failed rc=%d\n",
1845 				      __func__, rc);
1846 			goto err1;
1847 		}
1848 	}
1849 
1850 	level = init_intr(cfg, hwq);
1851 	if (unlikely(level)) {
1852 		dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
1853 		goto err2;
1854 	}
1855 
1856 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
1857 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1858 	 * element (pe) that is embedded in the context (ctx)
1859 	 */
1860 	rc = start_context(cfg, index);
1861 	if (unlikely(rc)) {
1862 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1863 		level = UNMAP_THREE;
1864 		goto err2;
1865 	}
1866 
1867 out:
1868 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1869 	return rc;
1870 err2:
1871 	term_intr(cfg, level, index);
1872 	if (index != PRIMARY_HWQ)
1873 		cxl_release_context(ctx);
1874 err1:
1875 	hwq->ctx = NULL;
1876 	goto out;
1877 }
1878 
1879 /**
1880  * get_num_afu_ports() - determines and configures the number of AFU ports
1881  * @cfg:	Internal structure associated with the host.
1882  *
1883  * This routine determines the number of AFU ports by converting the global
1884  * port selection mask. The converted value is only valid following an AFU
1885  * reset (explicit or power-on). This routine must be invoked shortly after
1886  * mapping as other routines are dependent on the number of ports during the
1887  * initialization sequence.
1888  *
1889  * To support legacy AFUs that might not have reflected an initial global
1890  * port mask (value read is 0), default to the number of ports originally
1891  * supported by the cxlflash driver (2) before hardware with other port
1892  * offerings was introduced.
1893  */
1894 static void get_num_afu_ports(struct cxlflash_cfg *cfg)
1895 {
1896 	struct afu *afu = cfg->afu;
1897 	struct device *dev = &cfg->dev->dev;
1898 	u64 port_mask;
1899 	int num_fc_ports = LEGACY_FC_PORTS;
1900 
1901 	port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1902 	if (port_mask != 0ULL)
1903 		num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
1904 
1905 	dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
1906 		__func__, port_mask, num_fc_ports);
1907 
1908 	cfg->num_fc_ports = num_fc_ports;
1909 	cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
1910 }
1911 
1912 /**
1913  * init_afu() - setup as master context and start AFU
1914  * @cfg:	Internal structure associated with the host.
1915  *
1916  * This routine is a higher level of control for configuring the
1917  * AFU on probe and reset paths.
1918  *
1919  * Return: 0 on success, -errno on failure
1920  */
1921 static int init_afu(struct cxlflash_cfg *cfg)
1922 {
1923 	u64 reg;
1924 	int rc = 0;
1925 	struct afu *afu = cfg->afu;
1926 	struct device *dev = &cfg->dev->dev;
1927 	struct hwq *hwq;
1928 	int i;
1929 
1930 	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1931 
1932 	afu->num_hwqs = afu->desired_hwqs;
1933 	for (i = 0; i < afu->num_hwqs; i++) {
1934 		rc = init_mc(cfg, i);
1935 		if (rc) {
1936 			dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
1937 				__func__, rc, i);
1938 			goto err1;
1939 		}
1940 	}
1941 
1942 	/* Map the entire MMIO space of the AFU using the first context */
1943 	hwq = get_hwq(afu, PRIMARY_HWQ);
1944 	afu->afu_map = cxl_psa_map(hwq->ctx);
1945 	if (!afu->afu_map) {
1946 		dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
1947 		rc = -ENOMEM;
1948 		goto err1;
1949 	}
1950 
1951 	/* No byte reverse on reading afu_version or string will be backwards */
1952 	reg = readq(&afu->afu_map->global.regs.afu_version);
1953 	memcpy(afu->version, &reg, sizeof(reg));
1954 	afu->interface_version =
1955 	    readq_be(&afu->afu_map->global.regs.interface_version);
1956 	if ((afu->interface_version + 1) == 0) {
1957 		dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
1958 			"interface version %016llx\n", afu->version,
1959 		       afu->interface_version);
1960 		rc = -EINVAL;
1961 		goto err1;
1962 	}
1963 
1964 	if (afu_is_sq_cmd_mode(afu)) {
1965 		afu->send_cmd = send_cmd_sq;
1966 		afu->context_reset = context_reset_sq;
1967 	} else {
1968 		afu->send_cmd = send_cmd_ioarrin;
1969 		afu->context_reset = context_reset_ioarrin;
1970 	}
1971 
1972 	dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
1973 		afu->version, afu->interface_version);
1974 
1975 	get_num_afu_ports(cfg);
1976 
1977 	rc = start_afu(cfg);
1978 	if (rc) {
1979 		dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
1980 		goto err1;
1981 	}
1982 
1983 	afu_err_intr_init(cfg->afu);
1984 	for (i = 0; i < afu->num_hwqs; i++) {
1985 		hwq = get_hwq(afu, i);
1986 
1987 		spin_lock_init(&hwq->rrin_slock);
1988 		hwq->room = readq_be(&hwq->host_map->cmd_room);
1989 	}
1990 
1991 	/* Restore the LUN mappings */
1992 	cxlflash_restore_luntable(cfg);
1993 out:
1994 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1995 	return rc;
1996 
1997 err1:
1998 	for (i = afu->num_hwqs - 1; i >= 0; i--) {
1999 		term_intr(cfg, UNMAP_THREE, i);
2000 		term_mc(cfg, i);
2001 	}
2002 	goto out;
2003 }
2004 
2005 /**
2006  * cxlflash_afu_sync() - builds and sends an AFU sync command
2007  * @afu:	AFU associated with the host.
2008  * @ctx_hndl_u:	Identifies context requesting sync.
2009  * @res_hndl_u:	Identifies resource requesting sync.
2010  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
2011  *
2012  * The AFU can only take 1 sync command at a time. This routine enforces this
2013  * limitation by using a mutex to provide exclusive access to the AFU during
2014  * the sync. This design point requires calling threads to not be on interrupt
2015  * context due to the possibility of sleeping during concurrent sync operations.
2016  *
2017  * AFU sync operations are only necessary and allowed when the device is
2018  * operating normally. When not operating normally, sync requests can occur as
2019  * part of cleaning up resources associated with an adapter prior to removal.
2020  * In this scenario, these requests are simply ignored (safe due to the AFU
2021  * going away).
2022  *
2023  * Return:
2024  *	0 on success
2025  *	-1 on failure
2026  */
2027 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
2028 		      res_hndl_t res_hndl_u, u8 mode)
2029 {
2030 	struct cxlflash_cfg *cfg = afu->parent;
2031 	struct device *dev = &cfg->dev->dev;
2032 	struct afu_cmd *cmd = NULL;
2033 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2034 	char *buf = NULL;
2035 	int rc = 0;
2036 	static DEFINE_MUTEX(sync_active);
2037 
2038 	if (cfg->state != STATE_NORMAL) {
2039 		dev_dbg(dev, "%s: Sync not required state=%u\n",
2040 			__func__, cfg->state);
2041 		return 0;
2042 	}
2043 
2044 	mutex_lock(&sync_active);
2045 	atomic_inc(&afu->cmds_active);
2046 	buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2047 	if (unlikely(!buf)) {
2048 		dev_err(dev, "%s: no memory for command\n", __func__);
2049 		rc = -1;
2050 		goto out;
2051 	}
2052 
2053 	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2054 	init_completion(&cmd->cevent);
2055 	cmd->parent = afu;
2056 	cmd->hwq_index = hwq->index;
2057 
2058 	dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
2059 
2060 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2061 	cmd->rcb.ctx_id = hwq->ctx_hndl;
2062 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
2063 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2064 
2065 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
2066 	cmd->rcb.cdb[1] = mode;
2067 
2068 	/* The cdb is aligned, no unaligned accessors required */
2069 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
2070 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
2071 
2072 	rc = afu->send_cmd(afu, cmd);
2073 	if (unlikely(rc))
2074 		goto out;
2075 
2076 	rc = wait_resp(afu, cmd);
2077 	if (unlikely(rc))
2078 		rc = -1;
2079 out:
2080 	atomic_dec(&afu->cmds_active);
2081 	mutex_unlock(&sync_active);
2082 	kfree(buf);
2083 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2084 	return rc;
2085 }
2086 
2087 /**
2088  * afu_reset() - resets the AFU
2089  * @cfg:	Internal structure associated with the host.
2090  *
2091  * Return: 0 on success, -errno on failure
2092  */
2093 static int afu_reset(struct cxlflash_cfg *cfg)
2094 {
2095 	struct device *dev = &cfg->dev->dev;
2096 	int rc = 0;
2097 
2098 	/* Stop the context before the reset. Since the context is
2099 	 * no longer available restart it after the reset is complete
2100 	 */
2101 	term_afu(cfg);
2102 
2103 	rc = init_afu(cfg);
2104 
2105 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2106 	return rc;
2107 }
2108 
2109 /**
2110  * drain_ioctls() - wait until all currently executing ioctls have completed
2111  * @cfg:	Internal structure associated with the host.
2112  *
2113  * Obtain write access to read/write semaphore that wraps ioctl
2114  * handling to 'drain' ioctls currently executing.
2115  */
2116 static void drain_ioctls(struct cxlflash_cfg *cfg)
2117 {
2118 	down_write(&cfg->ioctl_rwsem);
2119 	up_write(&cfg->ioctl_rwsem);
2120 }
2121 
2122 /**
2123  * cxlflash_eh_device_reset_handler() - reset a single LUN
2124  * @scp:	SCSI command to send.
2125  *
2126  * Return:
2127  *	SUCCESS as defined in scsi/scsi.h
2128  *	FAILED as defined in scsi/scsi.h
2129  */
2130 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2131 {
2132 	int rc = SUCCESS;
2133 	struct Scsi_Host *host = scp->device->host;
2134 	struct cxlflash_cfg *cfg = shost_priv(host);
2135 	struct device *dev = &cfg->dev->dev;
2136 	struct afu *afu = cfg->afu;
2137 	int rcr = 0;
2138 
2139 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2140 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2141 		scp->device->channel, scp->device->id, scp->device->lun,
2142 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2143 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2144 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2145 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2146 
2147 retry:
2148 	switch (cfg->state) {
2149 	case STATE_NORMAL:
2150 		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
2151 		if (unlikely(rcr))
2152 			rc = FAILED;
2153 		break;
2154 	case STATE_RESET:
2155 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2156 		goto retry;
2157 	default:
2158 		rc = FAILED;
2159 		break;
2160 	}
2161 
2162 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2163 	return rc;
2164 }
2165 
2166 /**
2167  * cxlflash_eh_host_reset_handler() - reset the host adapter
2168  * @scp:	SCSI command from stack identifying host.
2169  *
2170  * Following a reset, the state is evaluated again in case an EEH occurred
2171  * during the reset. In such a scenario, the host reset will either yield
2172  * until the EEH recovery is complete or return success or failure based
2173  * upon the current device state.
2174  *
2175  * Return:
2176  *	SUCCESS as defined in scsi/scsi.h
2177  *	FAILED as defined in scsi/scsi.h
2178  */
2179 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2180 {
2181 	int rc = SUCCESS;
2182 	int rcr = 0;
2183 	struct Scsi_Host *host = scp->device->host;
2184 	struct cxlflash_cfg *cfg = shost_priv(host);
2185 	struct device *dev = &cfg->dev->dev;
2186 
2187 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2188 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2189 		scp->device->channel, scp->device->id, scp->device->lun,
2190 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2191 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2192 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2193 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2194 
2195 	switch (cfg->state) {
2196 	case STATE_NORMAL:
2197 		cfg->state = STATE_RESET;
2198 		drain_ioctls(cfg);
2199 		cxlflash_mark_contexts_error(cfg);
2200 		rcr = afu_reset(cfg);
2201 		if (rcr) {
2202 			rc = FAILED;
2203 			cfg->state = STATE_FAILTERM;
2204 		} else
2205 			cfg->state = STATE_NORMAL;
2206 		wake_up_all(&cfg->reset_waitq);
2207 		ssleep(1);
2208 		/* fall through */
2209 	case STATE_RESET:
2210 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2211 		if (cfg->state == STATE_NORMAL)
2212 			break;
2213 		/* fall through */
2214 	default:
2215 		rc = FAILED;
2216 		break;
2217 	}
2218 
2219 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2220 	return rc;
2221 }
2222 
2223 /**
2224  * cxlflash_change_queue_depth() - change the queue depth for the device
2225  * @sdev:	SCSI device destined for queue depth change.
2226  * @qdepth:	Requested queue depth value to set.
2227  *
2228  * The requested queue depth is capped to the maximum supported value.
2229  *
2230  * Return: The actual queue depth set.
2231  */
2232 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2233 {
2234 
2235 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2236 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2237 
2238 	scsi_change_queue_depth(sdev, qdepth);
2239 	return sdev->queue_depth;
2240 }
2241 
2242 /**
2243  * cxlflash_show_port_status() - queries and presents the current port status
2244  * @port:	Desired port for status reporting.
2245  * @cfg:	Internal structure associated with the host.
2246  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2247  *
2248  * Return: The size of the ASCII string returned in @buf or -EINVAL.
2249  */
2250 static ssize_t cxlflash_show_port_status(u32 port,
2251 					 struct cxlflash_cfg *cfg,
2252 					 char *buf)
2253 {
2254 	struct device *dev = &cfg->dev->dev;
2255 	char *disp_status;
2256 	u64 status;
2257 	__be64 __iomem *fc_port_regs;
2258 
2259 	WARN_ON(port >= MAX_FC_PORTS);
2260 
2261 	if (port >= cfg->num_fc_ports) {
2262 		dev_info(dev, "%s: Port %d not supported on this card.\n",
2263 			__func__, port);
2264 		return -EINVAL;
2265 	}
2266 
2267 	fc_port_regs = get_fc_port_regs(cfg, port);
2268 	status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2269 	status &= FC_MTIP_STATUS_MASK;
2270 
2271 	if (status == FC_MTIP_STATUS_ONLINE)
2272 		disp_status = "online";
2273 	else if (status == FC_MTIP_STATUS_OFFLINE)
2274 		disp_status = "offline";
2275 	else
2276 		disp_status = "unknown";
2277 
2278 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2279 }
2280 
2281 /**
2282  * port0_show() - queries and presents the current status of port 0
2283  * @dev:	Generic device associated with the host owning the port.
2284  * @attr:	Device attribute representing the port.
2285  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2286  *
2287  * Return: The size of the ASCII string returned in @buf.
2288  */
2289 static ssize_t port0_show(struct device *dev,
2290 			  struct device_attribute *attr,
2291 			  char *buf)
2292 {
2293 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2294 
2295 	return cxlflash_show_port_status(0, cfg, buf);
2296 }
2297 
2298 /**
2299  * port1_show() - queries and presents the current status of port 1
2300  * @dev:	Generic device associated with the host owning the port.
2301  * @attr:	Device attribute representing the port.
2302  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2303  *
2304  * Return: The size of the ASCII string returned in @buf.
2305  */
2306 static ssize_t port1_show(struct device *dev,
2307 			  struct device_attribute *attr,
2308 			  char *buf)
2309 {
2310 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2311 
2312 	return cxlflash_show_port_status(1, cfg, buf);
2313 }
2314 
2315 /**
2316  * port2_show() - queries and presents the current status of port 2
2317  * @dev:	Generic device associated with the host owning the port.
2318  * @attr:	Device attribute representing the port.
2319  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2320  *
2321  * Return: The size of the ASCII string returned in @buf.
2322  */
2323 static ssize_t port2_show(struct device *dev,
2324 			  struct device_attribute *attr,
2325 			  char *buf)
2326 {
2327 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2328 
2329 	return cxlflash_show_port_status(2, cfg, buf);
2330 }
2331 
2332 /**
2333  * port3_show() - queries and presents the current status of port 3
2334  * @dev:	Generic device associated with the host owning the port.
2335  * @attr:	Device attribute representing the port.
2336  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2337  *
2338  * Return: The size of the ASCII string returned in @buf.
2339  */
2340 static ssize_t port3_show(struct device *dev,
2341 			  struct device_attribute *attr,
2342 			  char *buf)
2343 {
2344 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2345 
2346 	return cxlflash_show_port_status(3, cfg, buf);
2347 }
2348 
2349 /**
2350  * lun_mode_show() - presents the current LUN mode of the host
2351  * @dev:	Generic device associated with the host.
2352  * @attr:	Device attribute representing the LUN mode.
2353  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2354  *
2355  * Return: The size of the ASCII string returned in @buf.
2356  */
2357 static ssize_t lun_mode_show(struct device *dev,
2358 			     struct device_attribute *attr, char *buf)
2359 {
2360 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2361 	struct afu *afu = cfg->afu;
2362 
2363 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2364 }
2365 
2366 /**
2367  * lun_mode_store() - sets the LUN mode of the host
2368  * @dev:	Generic device associated with the host.
2369  * @attr:	Device attribute representing the LUN mode.
2370  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2371  * @count:	Length of data resizing in @buf.
2372  *
2373  * The CXL Flash AFU supports a dummy LUN mode where the external
2374  * links and storage are not required. Space on the FPGA is used
2375  * to create 1 or 2 small LUNs which are presented to the system
2376  * as if they were a normal storage device. This feature is useful
2377  * during development and also provides manufacturing with a way
2378  * to test the AFU without an actual device.
2379  *
2380  * 0 = external LUN[s] (default)
2381  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2382  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2383  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2384  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2385  *
2386  * Return: The size of the ASCII string returned in @buf.
2387  */
2388 static ssize_t lun_mode_store(struct device *dev,
2389 			      struct device_attribute *attr,
2390 			      const char *buf, size_t count)
2391 {
2392 	struct Scsi_Host *shost = class_to_shost(dev);
2393 	struct cxlflash_cfg *cfg = shost_priv(shost);
2394 	struct afu *afu = cfg->afu;
2395 	int rc;
2396 	u32 lun_mode;
2397 
2398 	rc = kstrtouint(buf, 10, &lun_mode);
2399 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2400 		afu->internal_lun = lun_mode;
2401 
2402 		/*
2403 		 * When configured for internal LUN, there is only one channel,
2404 		 * channel number 0, else there will be one less than the number
2405 		 * of fc ports for this card.
2406 		 */
2407 		if (afu->internal_lun)
2408 			shost->max_channel = 0;
2409 		else
2410 			shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2411 
2412 		afu_reset(cfg);
2413 		scsi_scan_host(cfg->host);
2414 	}
2415 
2416 	return count;
2417 }
2418 
2419 /**
2420  * ioctl_version_show() - presents the current ioctl version of the host
2421  * @dev:	Generic device associated with the host.
2422  * @attr:	Device attribute representing the ioctl version.
2423  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
2424  *
2425  * Return: The size of the ASCII string returned in @buf.
2426  */
2427 static ssize_t ioctl_version_show(struct device *dev,
2428 				  struct device_attribute *attr, char *buf)
2429 {
2430 	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2431 }
2432 
2433 /**
2434  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2435  * @port:	Desired port for status reporting.
2436  * @cfg:	Internal structure associated with the host.
2437  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2438  *
2439  * Return: The size of the ASCII string returned in @buf or -EINVAL.
2440  */
2441 static ssize_t cxlflash_show_port_lun_table(u32 port,
2442 					    struct cxlflash_cfg *cfg,
2443 					    char *buf)
2444 {
2445 	struct device *dev = &cfg->dev->dev;
2446 	__be64 __iomem *fc_port_luns;
2447 	int i;
2448 	ssize_t bytes = 0;
2449 
2450 	WARN_ON(port >= MAX_FC_PORTS);
2451 
2452 	if (port >= cfg->num_fc_ports) {
2453 		dev_info(dev, "%s: Port %d not supported on this card.\n",
2454 			__func__, port);
2455 		return -EINVAL;
2456 	}
2457 
2458 	fc_port_luns = get_fc_port_luns(cfg, port);
2459 
2460 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2461 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2462 				   "%03d: %016llx\n",
2463 				   i, readq_be(&fc_port_luns[i]));
2464 	return bytes;
2465 }
2466 
2467 /**
2468  * port0_lun_table_show() - presents the current LUN table of port 0
2469  * @dev:	Generic device associated with the host owning the port.
2470  * @attr:	Device attribute representing the port.
2471  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2472  *
2473  * Return: The size of the ASCII string returned in @buf.
2474  */
2475 static ssize_t port0_lun_table_show(struct device *dev,
2476 				    struct device_attribute *attr,
2477 				    char *buf)
2478 {
2479 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2480 
2481 	return cxlflash_show_port_lun_table(0, cfg, buf);
2482 }
2483 
2484 /**
2485  * port1_lun_table_show() - presents the current LUN table of port 1
2486  * @dev:	Generic device associated with the host owning the port.
2487  * @attr:	Device attribute representing the port.
2488  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2489  *
2490  * Return: The size of the ASCII string returned in @buf.
2491  */
2492 static ssize_t port1_lun_table_show(struct device *dev,
2493 				    struct device_attribute *attr,
2494 				    char *buf)
2495 {
2496 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2497 
2498 	return cxlflash_show_port_lun_table(1, cfg, buf);
2499 }
2500 
2501 /**
2502  * port2_lun_table_show() - presents the current LUN table of port 2
2503  * @dev:	Generic device associated with the host owning the port.
2504  * @attr:	Device attribute representing the port.
2505  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2506  *
2507  * Return: The size of the ASCII string returned in @buf.
2508  */
2509 static ssize_t port2_lun_table_show(struct device *dev,
2510 				    struct device_attribute *attr,
2511 				    char *buf)
2512 {
2513 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2514 
2515 	return cxlflash_show_port_lun_table(2, cfg, buf);
2516 }
2517 
2518 /**
2519  * port3_lun_table_show() - presents the current LUN table of port 3
2520  * @dev:	Generic device associated with the host owning the port.
2521  * @attr:	Device attribute representing the port.
2522  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2523  *
2524  * Return: The size of the ASCII string returned in @buf.
2525  */
2526 static ssize_t port3_lun_table_show(struct device *dev,
2527 				    struct device_attribute *attr,
2528 				    char *buf)
2529 {
2530 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2531 
2532 	return cxlflash_show_port_lun_table(3, cfg, buf);
2533 }
2534 
2535 /**
2536  * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2537  * @dev:	Generic device associated with the host.
2538  * @attr:	Device attribute representing the IRQ poll weight.
2539  * @buf:	Buffer of length PAGE_SIZE to report back the current IRQ poll
2540  *		weight in ASCII.
2541  *
2542  * An IRQ poll weight of 0 indicates polling is disabled.
2543  *
2544  * Return: The size of the ASCII string returned in @buf.
2545  */
2546 static ssize_t irqpoll_weight_show(struct device *dev,
2547 				   struct device_attribute *attr, char *buf)
2548 {
2549 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2550 	struct afu *afu = cfg->afu;
2551 
2552 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2553 }
2554 
2555 /**
2556  * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2557  * @dev:	Generic device associated with the host.
2558  * @attr:	Device attribute representing the IRQ poll weight.
2559  * @buf:	Buffer of length PAGE_SIZE containing the desired IRQ poll
2560  *		weight in ASCII.
2561  * @count:	Length of data resizing in @buf.
2562  *
2563  * An IRQ poll weight of 0 indicates polling is disabled.
2564  *
2565  * Return: The size of the ASCII string returned in @buf.
2566  */
2567 static ssize_t irqpoll_weight_store(struct device *dev,
2568 				    struct device_attribute *attr,
2569 				    const char *buf, size_t count)
2570 {
2571 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2572 	struct device *cfgdev = &cfg->dev->dev;
2573 	struct afu *afu = cfg->afu;
2574 	struct hwq *hwq;
2575 	u32 weight;
2576 	int rc, i;
2577 
2578 	rc = kstrtouint(buf, 10, &weight);
2579 	if (rc)
2580 		return -EINVAL;
2581 
2582 	if (weight > 256) {
2583 		dev_info(cfgdev,
2584 			 "Invalid IRQ poll weight. It must be 256 or less.\n");
2585 		return -EINVAL;
2586 	}
2587 
2588 	if (weight == afu->irqpoll_weight) {
2589 		dev_info(cfgdev,
2590 			 "Current IRQ poll weight has the same weight.\n");
2591 		return -EINVAL;
2592 	}
2593 
2594 	if (afu_is_irqpoll_enabled(afu)) {
2595 		for (i = 0; i < afu->num_hwqs; i++) {
2596 			hwq = get_hwq(afu, i);
2597 
2598 			irq_poll_disable(&hwq->irqpoll);
2599 		}
2600 	}
2601 
2602 	afu->irqpoll_weight = weight;
2603 
2604 	if (weight > 0) {
2605 		for (i = 0; i < afu->num_hwqs; i++) {
2606 			hwq = get_hwq(afu, i);
2607 
2608 			irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2609 		}
2610 	}
2611 
2612 	return count;
2613 }
2614 
2615 /**
2616  * num_hwqs_show() - presents the number of hardware queues for the host
2617  * @dev:	Generic device associated with the host.
2618  * @attr:	Device attribute representing the number of hardware queues.
2619  * @buf:	Buffer of length PAGE_SIZE to report back the number of hardware
2620  *		queues in ASCII.
2621  *
2622  * Return: The size of the ASCII string returned in @buf.
2623  */
2624 static ssize_t num_hwqs_show(struct device *dev,
2625 			     struct device_attribute *attr, char *buf)
2626 {
2627 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2628 	struct afu *afu = cfg->afu;
2629 
2630 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2631 }
2632 
2633 /**
2634  * num_hwqs_store() - sets the number of hardware queues for the host
2635  * @dev:	Generic device associated with the host.
2636  * @attr:	Device attribute representing the number of hardware queues.
2637  * @buf:	Buffer of length PAGE_SIZE containing the number of hardware
2638  *		queues in ASCII.
2639  * @count:	Length of data resizing in @buf.
2640  *
2641  * n > 0: num_hwqs = n
2642  * n = 0: num_hwqs = num_online_cpus()
2643  * n < 0: num_online_cpus() / abs(n)
2644  *
2645  * Return: The size of the ASCII string returned in @buf.
2646  */
2647 static ssize_t num_hwqs_store(struct device *dev,
2648 			      struct device_attribute *attr,
2649 			      const char *buf, size_t count)
2650 {
2651 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2652 	struct afu *afu = cfg->afu;
2653 	int rc;
2654 	int nhwqs, num_hwqs;
2655 
2656 	rc = kstrtoint(buf, 10, &nhwqs);
2657 	if (rc)
2658 		return -EINVAL;
2659 
2660 	if (nhwqs >= 1)
2661 		num_hwqs = nhwqs;
2662 	else if (nhwqs == 0)
2663 		num_hwqs = num_online_cpus();
2664 	else
2665 		num_hwqs = num_online_cpus() / abs(nhwqs);
2666 
2667 	afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
2668 	WARN_ON_ONCE(afu->desired_hwqs == 0);
2669 
2670 retry:
2671 	switch (cfg->state) {
2672 	case STATE_NORMAL:
2673 		cfg->state = STATE_RESET;
2674 		drain_ioctls(cfg);
2675 		cxlflash_mark_contexts_error(cfg);
2676 		rc = afu_reset(cfg);
2677 		if (rc)
2678 			cfg->state = STATE_FAILTERM;
2679 		else
2680 			cfg->state = STATE_NORMAL;
2681 		wake_up_all(&cfg->reset_waitq);
2682 		break;
2683 	case STATE_RESET:
2684 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2685 		if (cfg->state == STATE_NORMAL)
2686 			goto retry;
2687 	default:
2688 		/* Ideally should not happen */
2689 		dev_err(dev, "%s: Device is not ready, state=%d\n",
2690 			__func__, cfg->state);
2691 		break;
2692 	}
2693 
2694 	return count;
2695 }
2696 
2697 static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
2698 
2699 /**
2700  * hwq_mode_show() - presents the HWQ steering mode for the host
2701  * @dev:	Generic device associated with the host.
2702  * @attr:	Device attribute representing the HWQ steering mode.
2703  * @buf:	Buffer of length PAGE_SIZE to report back the HWQ steering mode
2704  *		as a character string.
2705  *
2706  * Return: The size of the ASCII string returned in @buf.
2707  */
2708 static ssize_t hwq_mode_show(struct device *dev,
2709 			     struct device_attribute *attr, char *buf)
2710 {
2711 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2712 	struct afu *afu = cfg->afu;
2713 
2714 	return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
2715 }
2716 
2717 /**
2718  * hwq_mode_store() - sets the HWQ steering mode for the host
2719  * @dev:	Generic device associated with the host.
2720  * @attr:	Device attribute representing the HWQ steering mode.
2721  * @buf:	Buffer of length PAGE_SIZE containing the HWQ steering mode
2722  *		as a character string.
2723  * @count:	Length of data resizing in @buf.
2724  *
2725  * rr = Round-Robin
2726  * tag = Block MQ Tagging
2727  * cpu = CPU Affinity
2728  *
2729  * Return: The size of the ASCII string returned in @buf.
2730  */
2731 static ssize_t hwq_mode_store(struct device *dev,
2732 			      struct device_attribute *attr,
2733 			      const char *buf, size_t count)
2734 {
2735 	struct Scsi_Host *shost = class_to_shost(dev);
2736 	struct cxlflash_cfg *cfg = shost_priv(shost);
2737 	struct device *cfgdev = &cfg->dev->dev;
2738 	struct afu *afu = cfg->afu;
2739 	int i;
2740 	u32 mode = MAX_HWQ_MODE;
2741 
2742 	for (i = 0; i < MAX_HWQ_MODE; i++) {
2743 		if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
2744 			mode = i;
2745 			break;
2746 		}
2747 	}
2748 
2749 	if (mode >= MAX_HWQ_MODE) {
2750 		dev_info(cfgdev, "Invalid HWQ steering mode.\n");
2751 		return -EINVAL;
2752 	}
2753 
2754 	if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
2755 		dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
2756 			 "HWQ steering mode.\n");
2757 		return -EINVAL;
2758 	}
2759 
2760 	afu->hwq_mode = mode;
2761 
2762 	return count;
2763 }
2764 
2765 /**
2766  * mode_show() - presents the current mode of the device
2767  * @dev:	Generic device associated with the device.
2768  * @attr:	Device attribute representing the device mode.
2769  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2770  *
2771  * Return: The size of the ASCII string returned in @buf.
2772  */
2773 static ssize_t mode_show(struct device *dev,
2774 			 struct device_attribute *attr, char *buf)
2775 {
2776 	struct scsi_device *sdev = to_scsi_device(dev);
2777 
2778 	return scnprintf(buf, PAGE_SIZE, "%s\n",
2779 			 sdev->hostdata ? "superpipe" : "legacy");
2780 }
2781 
2782 /*
2783  * Host attributes
2784  */
2785 static DEVICE_ATTR_RO(port0);
2786 static DEVICE_ATTR_RO(port1);
2787 static DEVICE_ATTR_RO(port2);
2788 static DEVICE_ATTR_RO(port3);
2789 static DEVICE_ATTR_RW(lun_mode);
2790 static DEVICE_ATTR_RO(ioctl_version);
2791 static DEVICE_ATTR_RO(port0_lun_table);
2792 static DEVICE_ATTR_RO(port1_lun_table);
2793 static DEVICE_ATTR_RO(port2_lun_table);
2794 static DEVICE_ATTR_RO(port3_lun_table);
2795 static DEVICE_ATTR_RW(irqpoll_weight);
2796 static DEVICE_ATTR_RW(num_hwqs);
2797 static DEVICE_ATTR_RW(hwq_mode);
2798 
2799 static struct device_attribute *cxlflash_host_attrs[] = {
2800 	&dev_attr_port0,
2801 	&dev_attr_port1,
2802 	&dev_attr_port2,
2803 	&dev_attr_port3,
2804 	&dev_attr_lun_mode,
2805 	&dev_attr_ioctl_version,
2806 	&dev_attr_port0_lun_table,
2807 	&dev_attr_port1_lun_table,
2808 	&dev_attr_port2_lun_table,
2809 	&dev_attr_port3_lun_table,
2810 	&dev_attr_irqpoll_weight,
2811 	&dev_attr_num_hwqs,
2812 	&dev_attr_hwq_mode,
2813 	NULL
2814 };
2815 
2816 /*
2817  * Device attributes
2818  */
2819 static DEVICE_ATTR_RO(mode);
2820 
2821 static struct device_attribute *cxlflash_dev_attrs[] = {
2822 	&dev_attr_mode,
2823 	NULL
2824 };
2825 
2826 /*
2827  * Host template
2828  */
2829 static struct scsi_host_template driver_template = {
2830 	.module = THIS_MODULE,
2831 	.name = CXLFLASH_ADAPTER_NAME,
2832 	.info = cxlflash_driver_info,
2833 	.ioctl = cxlflash_ioctl,
2834 	.proc_name = CXLFLASH_NAME,
2835 	.queuecommand = cxlflash_queuecommand,
2836 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2837 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2838 	.change_queue_depth = cxlflash_change_queue_depth,
2839 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2840 	.can_queue = CXLFLASH_MAX_CMDS,
2841 	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
2842 	.this_id = -1,
2843 	.sg_tablesize = 1,	/* No scatter gather support */
2844 	.max_sectors = CXLFLASH_MAX_SECTORS,
2845 	.use_clustering = ENABLE_CLUSTERING,
2846 	.shost_attrs = cxlflash_host_attrs,
2847 	.sdev_attrs = cxlflash_dev_attrs,
2848 };
2849 
2850 /*
2851  * Device dependent values
2852  */
2853 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
2854 					0ULL };
2855 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2856 					CXLFLASH_NOTIFY_SHUTDOWN };
2857 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
2858 					CXLFLASH_NOTIFY_SHUTDOWN };
2859 
2860 /*
2861  * PCI device binding table
2862  */
2863 static struct pci_device_id cxlflash_pci_table[] = {
2864 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2865 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2866 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2867 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2868 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
2869 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
2870 	{}
2871 };
2872 
2873 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2874 
2875 /**
2876  * cxlflash_worker_thread() - work thread handler for the AFU
2877  * @work:	Work structure contained within cxlflash associated with host.
2878  *
2879  * Handles the following events:
2880  * - Link reset which cannot be performed on interrupt context due to
2881  * blocking up to a few seconds
2882  * - Rescan the host
2883  */
2884 static void cxlflash_worker_thread(struct work_struct *work)
2885 {
2886 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2887 						work_q);
2888 	struct afu *afu = cfg->afu;
2889 	struct device *dev = &cfg->dev->dev;
2890 	__be64 __iomem *fc_port_regs;
2891 	int port;
2892 	ulong lock_flags;
2893 
2894 	/* Avoid MMIO if the device has failed */
2895 
2896 	if (cfg->state != STATE_NORMAL)
2897 		return;
2898 
2899 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2900 
2901 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
2902 		port = cfg->lr_port;
2903 		if (port < 0)
2904 			dev_err(dev, "%s: invalid port index %d\n",
2905 				__func__, port);
2906 		else {
2907 			spin_unlock_irqrestore(cfg->host->host_lock,
2908 					       lock_flags);
2909 
2910 			/* The reset can block... */
2911 			fc_port_regs = get_fc_port_regs(cfg, port);
2912 			afu_link_reset(afu, port, fc_port_regs);
2913 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2914 		}
2915 
2916 		cfg->lr_state = LINK_RESET_COMPLETE;
2917 	}
2918 
2919 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2920 
2921 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2922 		scsi_scan_host(cfg->host);
2923 }
2924 
2925 /**
2926  * cxlflash_probe() - PCI entry point to add host
2927  * @pdev:	PCI device associated with the host.
2928  * @dev_id:	PCI device id associated with device.
2929  *
2930  * The device will initially start out in a 'probing' state and
2931  * transition to the 'normal' state at the end of a successful
2932  * probe. Should an EEH event occur during probe, the notification
2933  * thread (error_detected()) will wait until the probe handler
2934  * is nearly complete. At that time, the device will be moved to
2935  * a 'probed' state and the EEH thread woken up to drive the slot
2936  * reset and recovery (device moves to 'normal' state). Meanwhile,
2937  * the probe will be allowed to exit successfully.
2938  *
2939  * Return: 0 on success, -errno on failure
2940  */
2941 static int cxlflash_probe(struct pci_dev *pdev,
2942 			  const struct pci_device_id *dev_id)
2943 {
2944 	struct Scsi_Host *host;
2945 	struct cxlflash_cfg *cfg = NULL;
2946 	struct device *dev = &pdev->dev;
2947 	struct dev_dependent_vals *ddv;
2948 	int rc = 0;
2949 	int k;
2950 
2951 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2952 		__func__, pdev->irq);
2953 
2954 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2955 	driver_template.max_sectors = ddv->max_sectors;
2956 
2957 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2958 	if (!host) {
2959 		dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
2960 		rc = -ENOMEM;
2961 		goto out;
2962 	}
2963 
2964 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2965 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2966 	host->unique_id = host->host_no;
2967 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2968 
2969 	cfg = shost_priv(host);
2970 	cfg->host = host;
2971 	rc = alloc_mem(cfg);
2972 	if (rc) {
2973 		dev_err(dev, "%s: alloc_mem failed\n", __func__);
2974 		rc = -ENOMEM;
2975 		scsi_host_put(cfg->host);
2976 		goto out;
2977 	}
2978 
2979 	cfg->init_state = INIT_STATE_NONE;
2980 	cfg->dev = pdev;
2981 	cfg->cxl_fops = cxlflash_cxl_fops;
2982 
2983 	/*
2984 	 * Promoted LUNs move to the top of the LUN table. The rest stay on
2985 	 * the bottom half. The bottom half grows from the end (index = 255),
2986 	 * whereas the top half grows from the beginning (index = 0).
2987 	 *
2988 	 * Initialize the last LUN index for all possible ports.
2989 	 */
2990 	cfg->promote_lun_index = 0;
2991 
2992 	for (k = 0; k < MAX_FC_PORTS; k++)
2993 		cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
2994 
2995 	cfg->dev_id = (struct pci_device_id *)dev_id;
2996 
2997 	init_waitqueue_head(&cfg->tmf_waitq);
2998 	init_waitqueue_head(&cfg->reset_waitq);
2999 
3000 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3001 	cfg->lr_state = LINK_RESET_INVALID;
3002 	cfg->lr_port = -1;
3003 	spin_lock_init(&cfg->tmf_slock);
3004 	mutex_init(&cfg->ctx_tbl_list_mutex);
3005 	mutex_init(&cfg->ctx_recovery_mutex);
3006 	init_rwsem(&cfg->ioctl_rwsem);
3007 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3008 	INIT_LIST_HEAD(&cfg->lluns);
3009 
3010 	pci_set_drvdata(pdev, cfg);
3011 
3012 	cfg->cxl_afu = cxl_pci_to_afu(pdev);
3013 
3014 	rc = init_pci(cfg);
3015 	if (rc) {
3016 		dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3017 		goto out_remove;
3018 	}
3019 	cfg->init_state = INIT_STATE_PCI;
3020 
3021 	rc = init_afu(cfg);
3022 	if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3023 		dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3024 		goto out_remove;
3025 	}
3026 	cfg->init_state = INIT_STATE_AFU;
3027 
3028 	rc = init_scsi(cfg);
3029 	if (rc) {
3030 		dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3031 		goto out_remove;
3032 	}
3033 	cfg->init_state = INIT_STATE_SCSI;
3034 
3035 	if (wq_has_sleeper(&cfg->reset_waitq)) {
3036 		cfg->state = STATE_PROBED;
3037 		wake_up_all(&cfg->reset_waitq);
3038 	} else
3039 		cfg->state = STATE_NORMAL;
3040 out:
3041 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3042 	return rc;
3043 
3044 out_remove:
3045 	cxlflash_remove(pdev);
3046 	goto out;
3047 }
3048 
3049 /**
3050  * cxlflash_pci_error_detected() - called when a PCI error is detected
3051  * @pdev:	PCI device struct.
3052  * @state:	PCI channel state.
3053  *
3054  * When an EEH occurs during an active reset, wait until the reset is
3055  * complete and then take action based upon the device state.
3056  *
3057  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3058  */
3059 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3060 						    pci_channel_state_t state)
3061 {
3062 	int rc = 0;
3063 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3064 	struct device *dev = &cfg->dev->dev;
3065 
3066 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3067 
3068 	switch (state) {
3069 	case pci_channel_io_frozen:
3070 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3071 					     cfg->state != STATE_PROBING);
3072 		if (cfg->state == STATE_FAILTERM)
3073 			return PCI_ERS_RESULT_DISCONNECT;
3074 
3075 		cfg->state = STATE_RESET;
3076 		scsi_block_requests(cfg->host);
3077 		drain_ioctls(cfg);
3078 		rc = cxlflash_mark_contexts_error(cfg);
3079 		if (unlikely(rc))
3080 			dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3081 				__func__, rc);
3082 		term_afu(cfg);
3083 		return PCI_ERS_RESULT_NEED_RESET;
3084 	case pci_channel_io_perm_failure:
3085 		cfg->state = STATE_FAILTERM;
3086 		wake_up_all(&cfg->reset_waitq);
3087 		scsi_unblock_requests(cfg->host);
3088 		return PCI_ERS_RESULT_DISCONNECT;
3089 	default:
3090 		break;
3091 	}
3092 	return PCI_ERS_RESULT_NEED_RESET;
3093 }
3094 
3095 /**
3096  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3097  * @pdev:	PCI device struct.
3098  *
3099  * This routine is called by the pci error recovery code after the PCI
3100  * slot has been reset, just before we should resume normal operations.
3101  *
3102  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3103  */
3104 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3105 {
3106 	int rc = 0;
3107 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3108 	struct device *dev = &cfg->dev->dev;
3109 
3110 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3111 
3112 	rc = init_afu(cfg);
3113 	if (unlikely(rc)) {
3114 		dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3115 		return PCI_ERS_RESULT_DISCONNECT;
3116 	}
3117 
3118 	return PCI_ERS_RESULT_RECOVERED;
3119 }
3120 
3121 /**
3122  * cxlflash_pci_resume() - called when normal operation can resume
3123  * @pdev:	PCI device struct
3124  */
3125 static void cxlflash_pci_resume(struct pci_dev *pdev)
3126 {
3127 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3128 	struct device *dev = &cfg->dev->dev;
3129 
3130 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3131 
3132 	cfg->state = STATE_NORMAL;
3133 	wake_up_all(&cfg->reset_waitq);
3134 	scsi_unblock_requests(cfg->host);
3135 }
3136 
3137 static const struct pci_error_handlers cxlflash_err_handler = {
3138 	.error_detected = cxlflash_pci_error_detected,
3139 	.slot_reset = cxlflash_pci_slot_reset,
3140 	.resume = cxlflash_pci_resume,
3141 };
3142 
3143 /*
3144  * PCI device structure
3145  */
3146 static struct pci_driver cxlflash_driver = {
3147 	.name = CXLFLASH_NAME,
3148 	.id_table = cxlflash_pci_table,
3149 	.probe = cxlflash_probe,
3150 	.remove = cxlflash_remove,
3151 	.shutdown = cxlflash_remove,
3152 	.err_handler = &cxlflash_err_handler,
3153 };
3154 
3155 /**
3156  * init_cxlflash() - module entry point
3157  *
3158  * Return: 0 on success, -errno on failure
3159  */
3160 static int __init init_cxlflash(void)
3161 {
3162 	check_sizes();
3163 	cxlflash_list_init();
3164 
3165 	return pci_register_driver(&cxlflash_driver);
3166 }
3167 
3168 /**
3169  * exit_cxlflash() - module exit point
3170  */
3171 static void __exit exit_cxlflash(void)
3172 {
3173 	cxlflash_term_global_luns();
3174 	cxlflash_free_errpage();
3175 
3176 	pci_unregister_driver(&cxlflash_driver);
3177 }
3178 
3179 module_init(init_cxlflash);
3180 module_exit(exit_cxlflash);
3181