xref: /linux/drivers/scsi/lpfc/lpfc_bsg.c (revision 6ed7ffddcf61f668114edb676417e5fb33773b59)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2009-2012 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *******************************************************************/
20 
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/list.h>
27 
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include <scsi/fc/fc_fs.h>
33 
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_bsg.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
42 #include "lpfc.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_debugfs.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_version.h"
48 
49 struct lpfc_bsg_event {
50 	struct list_head node;
51 	struct kref kref;
52 	wait_queue_head_t wq;
53 
54 	/* Event type and waiter identifiers */
55 	uint32_t type_mask;
56 	uint32_t req_id;
57 	uint32_t reg_id;
58 
59 	/* next two flags are here for the auto-delete logic */
60 	unsigned long wait_time_stamp;
61 	int waiting;
62 
63 	/* seen and not seen events */
64 	struct list_head events_to_get;
65 	struct list_head events_to_see;
66 
67 	/* job waiting for this event to finish */
68 	struct fc_bsg_job *set_job;
69 };
70 
71 struct lpfc_bsg_iocb {
72 	struct lpfc_iocbq *cmdiocbq;
73 	struct lpfc_iocbq *rspiocbq;
74 	struct lpfc_dmabuf *bmp;
75 	struct lpfc_nodelist *ndlp;
76 
77 	/* job waiting for this iocb to finish */
78 	struct fc_bsg_job *set_job;
79 };
80 
81 struct lpfc_bsg_mbox {
82 	LPFC_MBOXQ_t *pmboxq;
83 	MAILBOX_t *mb;
84 	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 	uint8_t *ext; /* extended mailbox data */
86 	uint32_t mbOffset; /* from app */
87 	uint32_t inExtWLen; /* from app */
88 	uint32_t outExtWLen; /* from app */
89 
90 	/* job waiting for this mbox command to finish */
91 	struct fc_bsg_job *set_job;
92 };
93 
94 #define MENLO_DID 0x0000FC0E
95 
96 struct lpfc_bsg_menlo {
97 	struct lpfc_iocbq *cmdiocbq;
98 	struct lpfc_iocbq *rspiocbq;
99 	struct lpfc_dmabuf *bmp;
100 
101 	/* job waiting for this iocb to finish */
102 	struct fc_bsg_job *set_job;
103 };
104 
105 #define TYPE_EVT 	1
106 #define TYPE_IOCB	2
107 #define TYPE_MBOX	3
108 #define TYPE_MENLO	4
109 struct bsg_job_data {
110 	uint32_t type;
111 	union {
112 		struct lpfc_bsg_event *evt;
113 		struct lpfc_bsg_iocb iocb;
114 		struct lpfc_bsg_mbox mbox;
115 		struct lpfc_bsg_menlo menlo;
116 	} context_un;
117 };
118 
119 struct event_data {
120 	struct list_head node;
121 	uint32_t type;
122 	uint32_t immed_dat;
123 	void *data;
124 	uint32_t len;
125 };
126 
127 #define BUF_SZ_4K 4096
128 #define SLI_CT_ELX_LOOPBACK 0x10
129 
130 enum ELX_LOOPBACK_CMD {
131 	ELX_LOOPBACK_XRI_SETUP,
132 	ELX_LOOPBACK_DATA,
133 };
134 
135 #define ELX_LOOPBACK_HEADER_SZ \
136 	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
137 
138 struct lpfc_dmabufext {
139 	struct lpfc_dmabuf dma;
140 	uint32_t size;
141 	uint32_t flag;
142 };
143 
144 /**
145  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
146  * @phba: Pointer to HBA context object.
147  * @cmdiocbq: Pointer to command iocb.
148  * @rspiocbq: Pointer to response iocb.
149  *
150  * This function is the completion handler for iocbs issued using
151  * lpfc_bsg_send_mgmt_cmd function. This function is called by the
152  * ring event handler function without any lock held. This function
153  * can be called from both worker thread context and interrupt
154  * context. This function also can be called from another thread which
155  * cleans up the SLI layer objects.
156  * This function copies the contents of the response iocb to the
157  * response iocb memory object provided by the caller of
158  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
159  * sleeps for the iocb completion.
160  **/
161 static void
162 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
163 			struct lpfc_iocbq *cmdiocbq,
164 			struct lpfc_iocbq *rspiocbq)
165 {
166 	struct bsg_job_data *dd_data;
167 	struct fc_bsg_job *job;
168 	IOCB_t *rsp;
169 	struct lpfc_dmabuf *bmp;
170 	struct lpfc_nodelist *ndlp;
171 	struct lpfc_bsg_iocb *iocb;
172 	unsigned long flags;
173 	int rc = 0;
174 
175 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 	dd_data = cmdiocbq->context2;
177 	if (!dd_data) {
178 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
179 		lpfc_sli_release_iocbq(phba, cmdiocbq);
180 		return;
181 	}
182 
183 	iocb = &dd_data->context_un.iocb;
184 	job = iocb->set_job;
185 	job->dd_data = NULL; /* so timeout handler does not reply */
186 
187 	bmp = iocb->bmp;
188 	rsp = &rspiocbq->iocb;
189 	ndlp = cmdiocbq->context1;
190 
191 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
192 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
193 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
194 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
195 
196 	if (rsp->ulpStatus) {
197 		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
198 			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
199 			case IOERR_SEQUENCE_TIMEOUT:
200 				rc = -ETIMEDOUT;
201 				break;
202 			case IOERR_INVALID_RPI:
203 				rc = -EFAULT;
204 				break;
205 			default:
206 				rc = -EACCES;
207 				break;
208 			}
209 		} else
210 			rc = -EACCES;
211 	} else
212 		job->reply->reply_payload_rcv_len =
213 			rsp->un.genreq64.bdl.bdeSize;
214 
215 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
216 	lpfc_sli_release_iocbq(phba, cmdiocbq);
217 	lpfc_nlp_put(ndlp);
218 	kfree(bmp);
219 	kfree(dd_data);
220 	/* make error code available to userspace */
221 	job->reply->result = rc;
222 	/* complete the job back to userspace */
223 	job->job_done(job);
224 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
225 	return;
226 }
227 
228 /**
229  * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
230  * @job: fc_bsg_job to handle
231  **/
232 static int
233 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
234 {
235 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
236 	struct lpfc_hba *phba = vport->phba;
237 	struct lpfc_rport_data *rdata = job->rport->dd_data;
238 	struct lpfc_nodelist *ndlp = rdata->pnode;
239 	struct ulp_bde64 *bpl = NULL;
240 	uint32_t timeout;
241 	struct lpfc_iocbq *cmdiocbq = NULL;
242 	IOCB_t *cmd;
243 	struct lpfc_dmabuf *bmp = NULL;
244 	int request_nseg;
245 	int reply_nseg;
246 	struct scatterlist *sgel = NULL;
247 	int numbde;
248 	dma_addr_t busaddr;
249 	struct bsg_job_data *dd_data;
250 	uint32_t creg_val;
251 	int rc = 0;
252 	int iocb_stat;
253 
254 	/* in case no data is transferred */
255 	job->reply->reply_payload_rcv_len = 0;
256 
257 	/* allocate our bsg tracking structure */
258 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
259 	if (!dd_data) {
260 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
261 				"2733 Failed allocation of dd_data\n");
262 		rc = -ENOMEM;
263 		goto no_dd_data;
264 	}
265 
266 	if (!lpfc_nlp_get(ndlp)) {
267 		rc = -ENODEV;
268 		goto no_ndlp;
269 	}
270 
271 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
272 	if (!bmp) {
273 		rc = -ENOMEM;
274 		goto free_ndlp;
275 	}
276 
277 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
278 		rc = -ENODEV;
279 		goto free_bmp;
280 	}
281 
282 	cmdiocbq = lpfc_sli_get_iocbq(phba);
283 	if (!cmdiocbq) {
284 		rc = -ENOMEM;
285 		goto free_bmp;
286 	}
287 
288 	cmd = &cmdiocbq->iocb;
289 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
290 	if (!bmp->virt) {
291 		rc = -ENOMEM;
292 		goto free_cmdiocbq;
293 	}
294 
295 	INIT_LIST_HEAD(&bmp->list);
296 	bpl = (struct ulp_bde64 *) bmp->virt;
297 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
298 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
299 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
300 		busaddr = sg_dma_address(sgel);
301 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
302 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
303 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
304 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
305 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
306 		bpl++;
307 	}
308 
309 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
310 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
311 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
312 		busaddr = sg_dma_address(sgel);
313 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
314 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
315 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
316 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
317 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
318 		bpl++;
319 	}
320 
321 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
322 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
323 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
324 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
325 	cmd->un.genreq64.bdl.bdeSize =
326 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
328 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
329 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
330 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
331 	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
332 	cmd->ulpBdeCount = 1;
333 	cmd->ulpLe = 1;
334 	cmd->ulpClass = CLASS3;
335 	cmd->ulpContext = ndlp->nlp_rpi;
336 	if (phba->sli_rev == LPFC_SLI_REV4)
337 		cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
338 	cmd->ulpOwner = OWN_CHIP;
339 	cmdiocbq->vport = phba->pport;
340 	cmdiocbq->context3 = bmp;
341 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
342 	timeout = phba->fc_ratov * 2;
343 	cmd->ulpTimeout = timeout;
344 
345 	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
346 	cmdiocbq->context1 = ndlp;
347 	cmdiocbq->context2 = dd_data;
348 	dd_data->type = TYPE_IOCB;
349 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
350 	dd_data->context_un.iocb.set_job = job;
351 	dd_data->context_un.iocb.bmp = bmp;
352 
353 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
354 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
355 			rc = -EIO ;
356 			goto free_cmdiocbq;
357 		}
358 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
359 		writel(creg_val, phba->HCregaddr);
360 		readl(phba->HCregaddr); /* flush */
361 	}
362 
363 	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
364 	if (iocb_stat == IOCB_SUCCESS)
365 		return 0; /* done for now */
366 	else if (iocb_stat == IOCB_BUSY)
367 		rc = -EAGAIN;
368 	else
369 		rc = -EIO;
370 
371 
372 	/* iocb failed so cleanup */
373 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
374 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
376 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 
378 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
379 
380 free_cmdiocbq:
381 	lpfc_sli_release_iocbq(phba, cmdiocbq);
382 free_bmp:
383 	kfree(bmp);
384 free_ndlp:
385 	lpfc_nlp_put(ndlp);
386 no_ndlp:
387 	kfree(dd_data);
388 no_dd_data:
389 	/* make error code available to userspace */
390 	job->reply->result = rc;
391 	job->dd_data = NULL;
392 	return rc;
393 }
394 
395 /**
396  * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
397  * @phba: Pointer to HBA context object.
398  * @cmdiocbq: Pointer to command iocb.
399  * @rspiocbq: Pointer to response iocb.
400  *
401  * This function is the completion handler for iocbs issued using
402  * lpfc_bsg_rport_els_cmp function. This function is called by the
403  * ring event handler function without any lock held. This function
404  * can be called from both worker thread context and interrupt
405  * context. This function also can be called from other thread which
406  * cleans up the SLI layer objects.
407  * This function copies the contents of the response iocb to the
408  * response iocb memory object provided by the caller of
409  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
410  * sleeps for the iocb completion.
411  **/
412 static void
413 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
414 			struct lpfc_iocbq *cmdiocbq,
415 			struct lpfc_iocbq *rspiocbq)
416 {
417 	struct bsg_job_data *dd_data;
418 	struct fc_bsg_job *job;
419 	IOCB_t *rsp;
420 	struct lpfc_nodelist *ndlp;
421 	struct lpfc_dmabuf *pbuflist = NULL;
422 	struct fc_bsg_ctels_reply *els_reply;
423 	uint8_t *rjt_data;
424 	unsigned long flags;
425 	int rc = 0;
426 
427 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
428 	dd_data = cmdiocbq->context1;
429 	/* normal completion and timeout crossed paths, already done */
430 	if (!dd_data) {
431 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
432 		return;
433 	}
434 
435 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
436 	if (cmdiocbq->context2 && rspiocbq)
437 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
438 		       &rspiocbq->iocb, sizeof(IOCB_t));
439 
440 	job = dd_data->context_un.iocb.set_job;
441 	cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
442 	rspiocbq = dd_data->context_un.iocb.rspiocbq;
443 	rsp = &rspiocbq->iocb;
444 	ndlp = dd_data->context_un.iocb.ndlp;
445 
446 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
447 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
448 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
449 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
450 
451 	if (job->reply->result == -EAGAIN)
452 		rc = -EAGAIN;
453 	else if (rsp->ulpStatus == IOSTAT_SUCCESS)
454 		job->reply->reply_payload_rcv_len =
455 			rsp->un.elsreq64.bdl.bdeSize;
456 	else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
457 		job->reply->reply_payload_rcv_len =
458 			sizeof(struct fc_bsg_ctels_reply);
459 		/* LS_RJT data returned in word 4 */
460 		rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
461 		els_reply = &job->reply->reply_data.ctels_reply;
462 		els_reply->status = FC_CTELS_STATUS_REJECT;
463 		els_reply->rjt_data.action = rjt_data[3];
464 		els_reply->rjt_data.reason_code = rjt_data[2];
465 		els_reply->rjt_data.reason_explanation = rjt_data[1];
466 		els_reply->rjt_data.vendor_unique = rjt_data[0];
467 	} else
468 		rc = -EIO;
469 
470 	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
471 	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
472 	lpfc_sli_release_iocbq(phba, rspiocbq);
473 	lpfc_sli_release_iocbq(phba, cmdiocbq);
474 	lpfc_nlp_put(ndlp);
475 	kfree(dd_data);
476 	/* make error code available to userspace */
477 	job->reply->result = rc;
478 	job->dd_data = NULL;
479 	/* complete the job back to userspace */
480 	job->job_done(job);
481 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
482 	return;
483 }
484 
485 /**
486  * lpfc_bsg_rport_els - send an ELS command from a bsg request
487  * @job: fc_bsg_job to handle
488  **/
489 static int
490 lpfc_bsg_rport_els(struct fc_bsg_job *job)
491 {
492 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
493 	struct lpfc_hba *phba = vport->phba;
494 	struct lpfc_rport_data *rdata = job->rport->dd_data;
495 	struct lpfc_nodelist *ndlp = rdata->pnode;
496 	uint32_t elscmd;
497 	uint32_t cmdsize;
498 	uint32_t rspsize;
499 	struct lpfc_iocbq *rspiocbq;
500 	struct lpfc_iocbq *cmdiocbq;
501 	IOCB_t *rsp;
502 	uint16_t rpi = 0;
503 	struct lpfc_dmabuf *pcmd;
504 	struct lpfc_dmabuf *prsp;
505 	struct lpfc_dmabuf *pbuflist = NULL;
506 	struct ulp_bde64 *bpl;
507 	int request_nseg;
508 	int reply_nseg;
509 	struct scatterlist *sgel = NULL;
510 	int numbde;
511 	dma_addr_t busaddr;
512 	struct bsg_job_data *dd_data;
513 	uint32_t creg_val;
514 	int rc = 0;
515 
516 	/* in case no data is transferred */
517 	job->reply->reply_payload_rcv_len = 0;
518 
519 	/* allocate our bsg tracking structure */
520 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
521 	if (!dd_data) {
522 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
523 				"2735 Failed allocation of dd_data\n");
524 		rc = -ENOMEM;
525 		goto no_dd_data;
526 	}
527 
528 	if (!lpfc_nlp_get(ndlp)) {
529 		rc = -ENODEV;
530 		goto free_dd_data;
531 	}
532 
533 	elscmd = job->request->rqst_data.r_els.els_code;
534 	cmdsize = job->request_payload.payload_len;
535 	rspsize = job->reply_payload.payload_len;
536 	rspiocbq = lpfc_sli_get_iocbq(phba);
537 	if (!rspiocbq) {
538 		lpfc_nlp_put(ndlp);
539 		rc = -ENOMEM;
540 		goto free_dd_data;
541 	}
542 
543 	rsp = &rspiocbq->iocb;
544 	rpi = ndlp->nlp_rpi;
545 
546 	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
547 				      ndlp->nlp_DID, elscmd);
548 	if (!cmdiocbq) {
549 		rc = -EIO;
550 		goto free_rspiocbq;
551 	}
552 
553 	/* prep els iocb set context1 to the ndlp, context2 to the command
554 	 * dmabuf, context3 holds the data dmabuf
555 	 */
556 	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
557 	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
558 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
559 	kfree(pcmd);
560 	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
561 	kfree(prsp);
562 	cmdiocbq->context2 = NULL;
563 
564 	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
565 	bpl = (struct ulp_bde64 *) pbuflist->virt;
566 
567 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
568 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
569 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
570 		busaddr = sg_dma_address(sgel);
571 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
572 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
573 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
574 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
575 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
576 		bpl++;
577 	}
578 
579 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
580 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
581 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
582 		busaddr = sg_dma_address(sgel);
583 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
584 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
585 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
586 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
587 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
588 		bpl++;
589 	}
590 	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
591 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
592 	if (phba->sli_rev == LPFC_SLI_REV4)
593 		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
594 	else
595 		cmdiocbq->iocb.ulpContext = rpi;
596 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
597 	cmdiocbq->context1 = NULL;
598 	cmdiocbq->context2 = NULL;
599 
600 	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
601 	cmdiocbq->context1 = dd_data;
602 	cmdiocbq->context_un.ndlp = ndlp;
603 	cmdiocbq->context2 = rspiocbq;
604 	dd_data->type = TYPE_IOCB;
605 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
606 	dd_data->context_un.iocb.rspiocbq = rspiocbq;
607 	dd_data->context_un.iocb.set_job = job;
608 	dd_data->context_un.iocb.bmp = NULL;
609 	dd_data->context_un.iocb.ndlp = ndlp;
610 
611 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
612 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
613 			rc = -EIO;
614 			goto linkdown_err;
615 		}
616 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
617 		writel(creg_val, phba->HCregaddr);
618 		readl(phba->HCregaddr); /* flush */
619 	}
620 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
621 	lpfc_nlp_put(ndlp);
622 	if (rc == IOCB_SUCCESS)
623 		return 0; /* done for now */
624 	else if (rc == IOCB_BUSY)
625 		rc = -EAGAIN;
626 	else
627 		rc = -EIO;
628 
629 linkdown_err:
630 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
631 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
632 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
633 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
634 
635 	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
636 
637 	lpfc_sli_release_iocbq(phba, cmdiocbq);
638 
639 free_rspiocbq:
640 	lpfc_sli_release_iocbq(phba, rspiocbq);
641 
642 free_dd_data:
643 	kfree(dd_data);
644 
645 no_dd_data:
646 	/* make error code available to userspace */
647 	job->reply->result = rc;
648 	job->dd_data = NULL;
649 	return rc;
650 }
651 
652 /**
653  * lpfc_bsg_event_free - frees an allocated event structure
654  * @kref: Pointer to a kref.
655  *
656  * Called from kref_put. Back cast the kref into an event structure address.
657  * Free any events to get, delete associated nodes, free any events to see,
658  * free any data then free the event itself.
659  **/
660 static void
661 lpfc_bsg_event_free(struct kref *kref)
662 {
663 	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
664 						  kref);
665 	struct event_data *ed;
666 
667 	list_del(&evt->node);
668 
669 	while (!list_empty(&evt->events_to_get)) {
670 		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
671 		list_del(&ed->node);
672 		kfree(ed->data);
673 		kfree(ed);
674 	}
675 
676 	while (!list_empty(&evt->events_to_see)) {
677 		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
678 		list_del(&ed->node);
679 		kfree(ed->data);
680 		kfree(ed);
681 	}
682 
683 	kfree(evt);
684 }
685 
686 /**
687  * lpfc_bsg_event_ref - increments the kref for an event
688  * @evt: Pointer to an event structure.
689  **/
690 static inline void
691 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
692 {
693 	kref_get(&evt->kref);
694 }
695 
696 /**
697  * lpfc_bsg_event_unref - Uses kref_put to free an event structure
698  * @evt: Pointer to an event structure.
699  **/
700 static inline void
701 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
702 {
703 	kref_put(&evt->kref, lpfc_bsg_event_free);
704 }
705 
706 /**
707  * lpfc_bsg_event_new - allocate and initialize a event structure
708  * @ev_mask: Mask of events.
709  * @ev_reg_id: Event reg id.
710  * @ev_req_id: Event request id.
711  **/
712 static struct lpfc_bsg_event *
713 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
714 {
715 	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
716 
717 	if (!evt)
718 		return NULL;
719 
720 	INIT_LIST_HEAD(&evt->events_to_get);
721 	INIT_LIST_HEAD(&evt->events_to_see);
722 	evt->type_mask = ev_mask;
723 	evt->req_id = ev_req_id;
724 	evt->reg_id = ev_reg_id;
725 	evt->wait_time_stamp = jiffies;
726 	init_waitqueue_head(&evt->wq);
727 	kref_init(&evt->kref);
728 	return evt;
729 }
730 
731 /**
732  * diag_cmd_data_free - Frees an lpfc dma buffer extension
733  * @phba: Pointer to HBA context object.
734  * @mlist: Pointer to an lpfc dma buffer extension.
735  **/
736 static int
737 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
738 {
739 	struct lpfc_dmabufext *mlast;
740 	struct pci_dev *pcidev;
741 	struct list_head head, *curr, *next;
742 
743 	if ((!mlist) || (!lpfc_is_link_up(phba) &&
744 		(phba->link_flag & LS_LOOPBACK_MODE))) {
745 		return 0;
746 	}
747 
748 	pcidev = phba->pcidev;
749 	list_add_tail(&head, &mlist->dma.list);
750 
751 	list_for_each_safe(curr, next, &head) {
752 		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
753 		if (mlast->dma.virt)
754 			dma_free_coherent(&pcidev->dev,
755 					  mlast->size,
756 					  mlast->dma.virt,
757 					  mlast->dma.phys);
758 		kfree(mlast);
759 	}
760 	return 0;
761 }
762 
763 /**
764  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
765  * @phba:
766  * @pring:
767  * @piocbq:
768  *
769  * This function is called when an unsolicited CT command is received.  It
770  * forwards the event to any processes registered to receive CT events.
771  **/
772 int
773 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
774 			struct lpfc_iocbq *piocbq)
775 {
776 	uint32_t evt_req_id = 0;
777 	uint32_t cmd;
778 	uint32_t len;
779 	struct lpfc_dmabuf *dmabuf = NULL;
780 	struct lpfc_bsg_event *evt;
781 	struct event_data *evt_dat = NULL;
782 	struct lpfc_iocbq *iocbq;
783 	size_t offset = 0;
784 	struct list_head head;
785 	struct ulp_bde64 *bde;
786 	dma_addr_t dma_addr;
787 	int i;
788 	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
789 	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
790 	struct lpfc_hbq_entry *hbqe;
791 	struct lpfc_sli_ct_request *ct_req;
792 	struct fc_bsg_job *job = NULL;
793 	unsigned long flags;
794 	int size = 0;
795 
796 	INIT_LIST_HEAD(&head);
797 	list_add_tail(&head, &piocbq->list);
798 
799 	if (piocbq->iocb.ulpBdeCount == 0 ||
800 	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
801 		goto error_ct_unsol_exit;
802 
803 	if (phba->link_state == LPFC_HBA_ERROR ||
804 		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
805 		goto error_ct_unsol_exit;
806 
807 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
808 		dmabuf = bdeBuf1;
809 	else {
810 		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
811 				    piocbq->iocb.un.cont64[0].addrLow);
812 		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
813 	}
814 	if (dmabuf == NULL)
815 		goto error_ct_unsol_exit;
816 	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
817 	evt_req_id = ct_req->FsType;
818 	cmd = ct_req->CommandResponse.bits.CmdRsp;
819 	len = ct_req->CommandResponse.bits.Size;
820 	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
821 		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
822 
823 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
824 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
825 		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
826 			evt->req_id != evt_req_id)
827 			continue;
828 
829 		lpfc_bsg_event_ref(evt);
830 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
831 		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
832 		if (evt_dat == NULL) {
833 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
834 			lpfc_bsg_event_unref(evt);
835 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
836 					"2614 Memory allocation failed for "
837 					"CT event\n");
838 			break;
839 		}
840 
841 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
842 			/* take accumulated byte count from the last iocbq */
843 			iocbq = list_entry(head.prev, typeof(*iocbq), list);
844 			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
845 		} else {
846 			list_for_each_entry(iocbq, &head, list) {
847 				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
848 					evt_dat->len +=
849 					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
850 			}
851 		}
852 
853 		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
854 		if (evt_dat->data == NULL) {
855 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
856 					"2615 Memory allocation failed for "
857 					"CT event data, size %d\n",
858 					evt_dat->len);
859 			kfree(evt_dat);
860 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
861 			lpfc_bsg_event_unref(evt);
862 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
863 			goto error_ct_unsol_exit;
864 		}
865 
866 		list_for_each_entry(iocbq, &head, list) {
867 			size = 0;
868 			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
869 				bdeBuf1 = iocbq->context2;
870 				bdeBuf2 = iocbq->context3;
871 			}
872 			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
873 				if (phba->sli3_options &
874 				    LPFC_SLI3_HBQ_ENABLED) {
875 					if (i == 0) {
876 						hbqe = (struct lpfc_hbq_entry *)
877 						  &iocbq->iocb.un.ulpWord[0];
878 						size = hbqe->bde.tus.f.bdeSize;
879 						dmabuf = bdeBuf1;
880 					} else if (i == 1) {
881 						hbqe = (struct lpfc_hbq_entry *)
882 							&iocbq->iocb.unsli3.
883 							sli3Words[4];
884 						size = hbqe->bde.tus.f.bdeSize;
885 						dmabuf = bdeBuf2;
886 					}
887 					if ((offset + size) > evt_dat->len)
888 						size = evt_dat->len - offset;
889 				} else {
890 					size = iocbq->iocb.un.cont64[i].
891 						tus.f.bdeSize;
892 					bde = &iocbq->iocb.un.cont64[i];
893 					dma_addr = getPaddr(bde->addrHigh,
894 							    bde->addrLow);
895 					dmabuf = lpfc_sli_ringpostbuf_get(phba,
896 							pring, dma_addr);
897 				}
898 				if (!dmabuf) {
899 					lpfc_printf_log(phba, KERN_ERR,
900 						LOG_LIBDFC, "2616 No dmabuf "
901 						"found for iocbq 0x%p\n",
902 						iocbq);
903 					kfree(evt_dat->data);
904 					kfree(evt_dat);
905 					spin_lock_irqsave(&phba->ct_ev_lock,
906 						flags);
907 					lpfc_bsg_event_unref(evt);
908 					spin_unlock_irqrestore(
909 						&phba->ct_ev_lock, flags);
910 					goto error_ct_unsol_exit;
911 				}
912 				memcpy((char *)(evt_dat->data) + offset,
913 				       dmabuf->virt, size);
914 				offset += size;
915 				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
916 				    !(phba->sli3_options &
917 				      LPFC_SLI3_HBQ_ENABLED)) {
918 					lpfc_sli_ringpostbuf_put(phba, pring,
919 								 dmabuf);
920 				} else {
921 					switch (cmd) {
922 					case ELX_LOOPBACK_DATA:
923 						if (phba->sli_rev <
924 						    LPFC_SLI_REV4)
925 							diag_cmd_data_free(phba,
926 							(struct lpfc_dmabufext
927 							 *)dmabuf);
928 						break;
929 					case ELX_LOOPBACK_XRI_SETUP:
930 						if ((phba->sli_rev ==
931 							LPFC_SLI_REV2) ||
932 							(phba->sli3_options &
933 							LPFC_SLI3_HBQ_ENABLED
934 							)) {
935 							lpfc_in_buf_free(phba,
936 									dmabuf);
937 						} else {
938 							lpfc_post_buffer(phba,
939 									 pring,
940 									 1);
941 						}
942 						break;
943 					default:
944 						if (!(phba->sli3_options &
945 						      LPFC_SLI3_HBQ_ENABLED))
946 							lpfc_post_buffer(phba,
947 									 pring,
948 									 1);
949 						break;
950 					}
951 				}
952 			}
953 		}
954 
955 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
956 		if (phba->sli_rev == LPFC_SLI_REV4) {
957 			evt_dat->immed_dat = phba->ctx_idx;
958 			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
959 			/* Provide warning for over-run of the ct_ctx array */
960 			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
961 			    UNSOL_VALID)
962 				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
963 						"2717 CT context array entry "
964 						"[%d] over-run: oxid:x%x, "
965 						"sid:x%x\n", phba->ctx_idx,
966 						phba->ct_ctx[
967 						    evt_dat->immed_dat].oxid,
968 						phba->ct_ctx[
969 						    evt_dat->immed_dat].SID);
970 			phba->ct_ctx[evt_dat->immed_dat].rxid =
971 				piocbq->iocb.ulpContext;
972 			phba->ct_ctx[evt_dat->immed_dat].oxid =
973 				piocbq->iocb.unsli3.rcvsli3.ox_id;
974 			phba->ct_ctx[evt_dat->immed_dat].SID =
975 				piocbq->iocb.un.rcvels.remoteID;
976 			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
977 		} else
978 			evt_dat->immed_dat = piocbq->iocb.ulpContext;
979 
980 		evt_dat->type = FC_REG_CT_EVENT;
981 		list_add(&evt_dat->node, &evt->events_to_see);
982 		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
983 			wake_up_interruptible(&evt->wq);
984 			lpfc_bsg_event_unref(evt);
985 			break;
986 		}
987 
988 		list_move(evt->events_to_see.prev, &evt->events_to_get);
989 		lpfc_bsg_event_unref(evt);
990 
991 		job = evt->set_job;
992 		evt->set_job = NULL;
993 		if (job) {
994 			job->reply->reply_payload_rcv_len = size;
995 			/* make error code available to userspace */
996 			job->reply->result = 0;
997 			job->dd_data = NULL;
998 			/* complete the job back to userspace */
999 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1000 			job->job_done(job);
1001 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1002 		}
1003 	}
1004 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1005 
1006 error_ct_unsol_exit:
1007 	if (!list_empty(&head))
1008 		list_del(&head);
1009 	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1010 	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1011 		return 0;
1012 	return 1;
1013 }
1014 
1015 /**
1016  * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1017  * @phba: Pointer to HBA context object.
1018  * @dmabuf: pointer to a dmabuf that describes the FC sequence
1019  *
1020  * This function handles abort to the CT command toward management plane
1021  * for SLI4 port.
1022  *
1023  * If the pending context of a CT command to management plane present, clears
1024  * such context and returns 1 for handled; otherwise, it returns 0 indicating
1025  * no context exists.
1026  **/
1027 int
1028 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1029 {
1030 	struct fc_frame_header fc_hdr;
1031 	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1032 	int ctx_idx, handled = 0;
1033 	uint16_t oxid, rxid;
1034 	uint32_t sid;
1035 
1036 	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1037 	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1038 	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1039 	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1040 
1041 	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1042 		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1043 			continue;
1044 		if (phba->ct_ctx[ctx_idx].rxid != rxid)
1045 			continue;
1046 		if (phba->ct_ctx[ctx_idx].oxid != oxid)
1047 			continue;
1048 		if (phba->ct_ctx[ctx_idx].SID != sid)
1049 			continue;
1050 		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1051 		handled = 1;
1052 	}
1053 	return handled;
1054 }
1055 
1056 /**
1057  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1058  * @job: SET_EVENT fc_bsg_job
1059  **/
1060 static int
1061 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1062 {
1063 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1064 	struct lpfc_hba *phba = vport->phba;
1065 	struct set_ct_event *event_req;
1066 	struct lpfc_bsg_event *evt;
1067 	int rc = 0;
1068 	struct bsg_job_data *dd_data = NULL;
1069 	uint32_t ev_mask;
1070 	unsigned long flags;
1071 
1072 	if (job->request_len <
1073 	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1074 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1075 				"2612 Received SET_CT_EVENT below minimum "
1076 				"size\n");
1077 		rc = -EINVAL;
1078 		goto job_error;
1079 	}
1080 
1081 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1082 	if (dd_data == NULL) {
1083 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1084 				"2734 Failed allocation of dd_data\n");
1085 		rc = -ENOMEM;
1086 		goto job_error;
1087 	}
1088 
1089 	event_req = (struct set_ct_event *)
1090 		job->request->rqst_data.h_vendor.vendor_cmd;
1091 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1092 				FC_REG_EVENT_MASK);
1093 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1094 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1095 		if (evt->reg_id == event_req->ev_reg_id) {
1096 			lpfc_bsg_event_ref(evt);
1097 			evt->wait_time_stamp = jiffies;
1098 			break;
1099 		}
1100 	}
1101 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1102 
1103 	if (&evt->node == &phba->ct_ev_waiters) {
1104 		/* no event waiting struct yet - first call */
1105 		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1106 					event_req->ev_req_id);
1107 		if (!evt) {
1108 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1109 					"2617 Failed allocation of event "
1110 					"waiter\n");
1111 			rc = -ENOMEM;
1112 			goto job_error;
1113 		}
1114 
1115 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1116 		list_add(&evt->node, &phba->ct_ev_waiters);
1117 		lpfc_bsg_event_ref(evt);
1118 		evt->wait_time_stamp = jiffies;
1119 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1120 	}
1121 
1122 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1123 	evt->waiting = 1;
1124 	dd_data->type = TYPE_EVT;
1125 	dd_data->context_un.evt = evt;
1126 	evt->set_job = job; /* for unsolicited command */
1127 	job->dd_data = dd_data; /* for fc transport timeout callback*/
1128 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1129 	return 0; /* call job done later */
1130 
1131 job_error:
1132 	if (dd_data != NULL)
1133 		kfree(dd_data);
1134 
1135 	job->dd_data = NULL;
1136 	return rc;
1137 }
1138 
1139 /**
1140  * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1141  * @job: GET_EVENT fc_bsg_job
1142  **/
1143 static int
1144 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1145 {
1146 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1147 	struct lpfc_hba *phba = vport->phba;
1148 	struct get_ct_event *event_req;
1149 	struct get_ct_event_reply *event_reply;
1150 	struct lpfc_bsg_event *evt;
1151 	struct event_data *evt_dat = NULL;
1152 	unsigned long flags;
1153 	uint32_t rc = 0;
1154 
1155 	if (job->request_len <
1156 	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1157 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1158 				"2613 Received GET_CT_EVENT request below "
1159 				"minimum size\n");
1160 		rc = -EINVAL;
1161 		goto job_error;
1162 	}
1163 
1164 	event_req = (struct get_ct_event *)
1165 		job->request->rqst_data.h_vendor.vendor_cmd;
1166 
1167 	event_reply = (struct get_ct_event_reply *)
1168 		job->reply->reply_data.vendor_reply.vendor_rsp;
1169 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1170 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1171 		if (evt->reg_id == event_req->ev_reg_id) {
1172 			if (list_empty(&evt->events_to_get))
1173 				break;
1174 			lpfc_bsg_event_ref(evt);
1175 			evt->wait_time_stamp = jiffies;
1176 			evt_dat = list_entry(evt->events_to_get.prev,
1177 					     struct event_data, node);
1178 			list_del(&evt_dat->node);
1179 			break;
1180 		}
1181 	}
1182 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1183 
1184 	/* The app may continue to ask for event data until it gets
1185 	 * an error indicating that there isn't anymore
1186 	 */
1187 	if (evt_dat == NULL) {
1188 		job->reply->reply_payload_rcv_len = 0;
1189 		rc = -ENOENT;
1190 		goto job_error;
1191 	}
1192 
1193 	if (evt_dat->len > job->request_payload.payload_len) {
1194 		evt_dat->len = job->request_payload.payload_len;
1195 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1196 				"2618 Truncated event data at %d "
1197 				"bytes\n",
1198 				job->request_payload.payload_len);
1199 	}
1200 
1201 	event_reply->type = evt_dat->type;
1202 	event_reply->immed_data = evt_dat->immed_dat;
1203 	if (evt_dat->len > 0)
1204 		job->reply->reply_payload_rcv_len =
1205 			sg_copy_from_buffer(job->request_payload.sg_list,
1206 					    job->request_payload.sg_cnt,
1207 					    evt_dat->data, evt_dat->len);
1208 	else
1209 		job->reply->reply_payload_rcv_len = 0;
1210 
1211 	if (evt_dat) {
1212 		kfree(evt_dat->data);
1213 		kfree(evt_dat);
1214 	}
1215 
1216 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1217 	lpfc_bsg_event_unref(evt);
1218 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1219 	job->dd_data = NULL;
1220 	job->reply->result = 0;
1221 	job->job_done(job);
1222 	return 0;
1223 
1224 job_error:
1225 	job->dd_data = NULL;
1226 	job->reply->result = rc;
1227 	return rc;
1228 }
1229 
1230 /**
1231  * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1232  * @phba: Pointer to HBA context object.
1233  * @cmdiocbq: Pointer to command iocb.
1234  * @rspiocbq: Pointer to response iocb.
1235  *
1236  * This function is the completion handler for iocbs issued using
1237  * lpfc_issue_ct_rsp_cmp function. This function is called by the
1238  * ring event handler function without any lock held. This function
1239  * can be called from both worker thread context and interrupt
1240  * context. This function also can be called from other thread which
1241  * cleans up the SLI layer objects.
1242  * This function copy the contents of the response iocb to the
1243  * response iocb memory object provided by the caller of
1244  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1245  * sleeps for the iocb completion.
1246  **/
1247 static void
1248 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1249 			struct lpfc_iocbq *cmdiocbq,
1250 			struct lpfc_iocbq *rspiocbq)
1251 {
1252 	struct bsg_job_data *dd_data;
1253 	struct fc_bsg_job *job;
1254 	IOCB_t *rsp;
1255 	struct lpfc_dmabuf *bmp;
1256 	struct lpfc_nodelist *ndlp;
1257 	unsigned long flags;
1258 	int rc = 0;
1259 
1260 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1261 	dd_data = cmdiocbq->context2;
1262 	/* normal completion and timeout crossed paths, already done */
1263 	if (!dd_data) {
1264 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1265 		return;
1266 	}
1267 
1268 	job = dd_data->context_un.iocb.set_job;
1269 	bmp = dd_data->context_un.iocb.bmp;
1270 	rsp = &rspiocbq->iocb;
1271 	ndlp = dd_data->context_un.iocb.ndlp;
1272 
1273 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1274 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
1275 
1276 	if (rsp->ulpStatus) {
1277 		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1278 			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1279 			case IOERR_SEQUENCE_TIMEOUT:
1280 				rc = -ETIMEDOUT;
1281 				break;
1282 			case IOERR_INVALID_RPI:
1283 				rc = -EFAULT;
1284 				break;
1285 			default:
1286 				rc = -EACCES;
1287 				break;
1288 			}
1289 		} else
1290 			rc = -EACCES;
1291 	} else
1292 		job->reply->reply_payload_rcv_len =
1293 			rsp->un.genreq64.bdl.bdeSize;
1294 
1295 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1296 	lpfc_sli_release_iocbq(phba, cmdiocbq);
1297 	lpfc_nlp_put(ndlp);
1298 	kfree(bmp);
1299 	kfree(dd_data);
1300 	/* make error code available to userspace */
1301 	job->reply->result = rc;
1302 	job->dd_data = NULL;
1303 	/* complete the job back to userspace */
1304 	job->job_done(job);
1305 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1306 	return;
1307 }
1308 
1309 /**
1310  * lpfc_issue_ct_rsp - issue a ct response
1311  * @phba: Pointer to HBA context object.
1312  * @job: Pointer to the job object.
1313  * @tag: tag index value into the ports context exchange array.
1314  * @bmp: Pointer to a dma buffer descriptor.
1315  * @num_entry: Number of enties in the bde.
1316  **/
1317 static int
1318 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1319 		  struct lpfc_dmabuf *bmp, int num_entry)
1320 {
1321 	IOCB_t *icmd;
1322 	struct lpfc_iocbq *ctiocb = NULL;
1323 	int rc = 0;
1324 	struct lpfc_nodelist *ndlp = NULL;
1325 	struct bsg_job_data *dd_data;
1326 	uint32_t creg_val;
1327 
1328 	/* allocate our bsg tracking structure */
1329 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1330 	if (!dd_data) {
1331 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1332 				"2736 Failed allocation of dd_data\n");
1333 		rc = -ENOMEM;
1334 		goto no_dd_data;
1335 	}
1336 
1337 	/* Allocate buffer for  command iocb */
1338 	ctiocb = lpfc_sli_get_iocbq(phba);
1339 	if (!ctiocb) {
1340 		rc = -ENOMEM;
1341 		goto no_ctiocb;
1342 	}
1343 
1344 	icmd = &ctiocb->iocb;
1345 	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1346 	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1347 	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1348 	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1349 	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1350 	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1351 	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1352 	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1353 	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1354 
1355 	/* Fill in rest of iocb */
1356 	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1357 	icmd->ulpBdeCount = 1;
1358 	icmd->ulpLe = 1;
1359 	icmd->ulpClass = CLASS3;
1360 	if (phba->sli_rev == LPFC_SLI_REV4) {
1361 		/* Do not issue unsol response if oxid not marked as valid */
1362 		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1363 			rc = IOCB_ERROR;
1364 			goto issue_ct_rsp_exit;
1365 		}
1366 		icmd->ulpContext = phba->ct_ctx[tag].rxid;
1367 		icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1368 		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1369 		if (!ndlp) {
1370 			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1371 				 "2721 ndlp null for oxid %x SID %x\n",
1372 					icmd->ulpContext,
1373 					phba->ct_ctx[tag].SID);
1374 			rc = IOCB_ERROR;
1375 			goto issue_ct_rsp_exit;
1376 		}
1377 
1378 		/* Check if the ndlp is active */
1379 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1380 			rc = -IOCB_ERROR;
1381 			goto issue_ct_rsp_exit;
1382 		}
1383 
1384 		/* get a refernece count so the ndlp doesn't go away while
1385 		 * we respond
1386 		 */
1387 		if (!lpfc_nlp_get(ndlp)) {
1388 			rc = -IOCB_ERROR;
1389 			goto issue_ct_rsp_exit;
1390 		}
1391 
1392 		icmd->un.ulpWord[3] =
1393 				phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1394 
1395 		/* The exchange is done, mark the entry as invalid */
1396 		phba->ct_ctx[tag].valid = UNSOL_INVALID;
1397 	} else
1398 		icmd->ulpContext = (ushort) tag;
1399 
1400 	icmd->ulpTimeout = phba->fc_ratov * 2;
1401 
1402 	/* Xmit CT response on exchange <xid> */
1403 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1404 		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1405 		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1406 
1407 	ctiocb->iocb_cmpl = NULL;
1408 	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1409 	ctiocb->vport = phba->pport;
1410 	ctiocb->context3 = bmp;
1411 
1412 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1413 	ctiocb->context2 = dd_data;
1414 	ctiocb->context1 = ndlp;
1415 	dd_data->type = TYPE_IOCB;
1416 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1417 	dd_data->context_un.iocb.rspiocbq = NULL;
1418 	dd_data->context_un.iocb.set_job = job;
1419 	dd_data->context_un.iocb.bmp = bmp;
1420 	dd_data->context_un.iocb.ndlp = ndlp;
1421 
1422 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1423 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1424 			rc = -IOCB_ERROR;
1425 			goto issue_ct_rsp_exit;
1426 		}
1427 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1428 		writel(creg_val, phba->HCregaddr);
1429 		readl(phba->HCregaddr); /* flush */
1430 	}
1431 
1432 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1433 
1434 	if (rc == IOCB_SUCCESS)
1435 		return 0; /* done for now */
1436 
1437 issue_ct_rsp_exit:
1438 	lpfc_sli_release_iocbq(phba, ctiocb);
1439 no_ctiocb:
1440 	kfree(dd_data);
1441 no_dd_data:
1442 	return rc;
1443 }
1444 
1445 /**
1446  * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1447  * @job: SEND_MGMT_RESP fc_bsg_job
1448  **/
1449 static int
1450 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1451 {
1452 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1453 	struct lpfc_hba *phba = vport->phba;
1454 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1455 		job->request->rqst_data.h_vendor.vendor_cmd;
1456 	struct ulp_bde64 *bpl;
1457 	struct lpfc_dmabuf *bmp = NULL;
1458 	struct scatterlist *sgel = NULL;
1459 	int request_nseg;
1460 	int numbde;
1461 	dma_addr_t busaddr;
1462 	uint32_t tag = mgmt_resp->tag;
1463 	unsigned long reqbfrcnt =
1464 			(unsigned long)job->request_payload.payload_len;
1465 	int rc = 0;
1466 
1467 	/* in case no data is transferred */
1468 	job->reply->reply_payload_rcv_len = 0;
1469 
1470 	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1471 		rc = -ERANGE;
1472 		goto send_mgmt_rsp_exit;
1473 	}
1474 
1475 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1476 	if (!bmp) {
1477 		rc = -ENOMEM;
1478 		goto send_mgmt_rsp_exit;
1479 	}
1480 
1481 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1482 	if (!bmp->virt) {
1483 		rc = -ENOMEM;
1484 		goto send_mgmt_rsp_free_bmp;
1485 	}
1486 
1487 	INIT_LIST_HEAD(&bmp->list);
1488 	bpl = (struct ulp_bde64 *) bmp->virt;
1489 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1490 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
1491 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1492 		busaddr = sg_dma_address(sgel);
1493 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1494 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
1495 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
1496 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1497 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1498 		bpl++;
1499 	}
1500 
1501 	rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1502 
1503 	if (rc == IOCB_SUCCESS)
1504 		return 0; /* done for now */
1505 
1506 	/* TBD need to handle a timeout */
1507 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1508 			  job->request_payload.sg_cnt, DMA_TO_DEVICE);
1509 	rc = -EACCES;
1510 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1511 
1512 send_mgmt_rsp_free_bmp:
1513 	kfree(bmp);
1514 send_mgmt_rsp_exit:
1515 	/* make error code available to userspace */
1516 	job->reply->result = rc;
1517 	job->dd_data = NULL;
1518 	return rc;
1519 }
1520 
1521 /**
1522  * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1523  * @phba: Pointer to HBA context object.
1524  *
1525  * This function is responsible for preparing driver for diag loopback
1526  * on device.
1527  */
1528 static int
1529 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1530 {
1531 	struct lpfc_vport **vports;
1532 	struct Scsi_Host *shost;
1533 	struct lpfc_sli *psli;
1534 	struct lpfc_sli_ring *pring;
1535 	int i = 0;
1536 
1537 	psli = &phba->sli;
1538 	if (!psli)
1539 		return -ENODEV;
1540 
1541 	pring = &psli->ring[LPFC_FCP_RING];
1542 	if (!pring)
1543 		return -ENODEV;
1544 
1545 	if ((phba->link_state == LPFC_HBA_ERROR) ||
1546 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1547 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1548 		return -EACCES;
1549 
1550 	vports = lpfc_create_vport_work_array(phba);
1551 	if (vports) {
1552 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1553 			shost = lpfc_shost_from_vport(vports[i]);
1554 			scsi_block_requests(shost);
1555 		}
1556 		lpfc_destroy_vport_work_array(phba, vports);
1557 	} else {
1558 		shost = lpfc_shost_from_vport(phba->pport);
1559 		scsi_block_requests(shost);
1560 	}
1561 
1562 	while (pring->txcmplq_cnt) {
1563 		if (i++ > 500)  /* wait up to 5 seconds */
1564 			break;
1565 		msleep(10);
1566 	}
1567 	return 0;
1568 }
1569 
1570 /**
1571  * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1572  * @phba: Pointer to HBA context object.
1573  *
1574  * This function is responsible for driver exit processing of setting up
1575  * diag loopback mode on device.
1576  */
1577 static void
1578 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1579 {
1580 	struct Scsi_Host *shost;
1581 	struct lpfc_vport **vports;
1582 	int i;
1583 
1584 	vports = lpfc_create_vport_work_array(phba);
1585 	if (vports) {
1586 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1587 			shost = lpfc_shost_from_vport(vports[i]);
1588 			scsi_unblock_requests(shost);
1589 		}
1590 		lpfc_destroy_vport_work_array(phba, vports);
1591 	} else {
1592 		shost = lpfc_shost_from_vport(phba->pport);
1593 		scsi_unblock_requests(shost);
1594 	}
1595 	return;
1596 }
1597 
1598 /**
1599  * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1600  * @phba: Pointer to HBA context object.
1601  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1602  *
1603  * This function is responsible for placing an sli3  port into diagnostic
1604  * loopback mode in order to perform a diagnostic loopback test.
1605  * All new scsi requests are blocked, a small delay is used to allow the
1606  * scsi requests to complete then the link is brought down. If the link is
1607  * is placed in loopback mode then scsi requests are again allowed
1608  * so the scsi mid-layer doesn't give up on the port.
1609  * All of this is done in-line.
1610  */
1611 static int
1612 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1613 {
1614 	struct diag_mode_set *loopback_mode;
1615 	uint32_t link_flags;
1616 	uint32_t timeout;
1617 	LPFC_MBOXQ_t *pmboxq  = NULL;
1618 	int mbxstatus = MBX_SUCCESS;
1619 	int i = 0;
1620 	int rc = 0;
1621 
1622 	/* no data to return just the return code */
1623 	job->reply->reply_payload_rcv_len = 0;
1624 
1625 	if (job->request_len < sizeof(struct fc_bsg_request) +
1626 	    sizeof(struct diag_mode_set)) {
1627 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1628 				"2738 Received DIAG MODE request size:%d "
1629 				"below the minimum size:%d\n",
1630 				job->request_len,
1631 				(int)(sizeof(struct fc_bsg_request) +
1632 				sizeof(struct diag_mode_set)));
1633 		rc = -EINVAL;
1634 		goto job_error;
1635 	}
1636 
1637 	rc = lpfc_bsg_diag_mode_enter(phba);
1638 	if (rc)
1639 		goto job_error;
1640 
1641 	/* bring the link to diagnostic mode */
1642 	loopback_mode = (struct diag_mode_set *)
1643 		job->request->rqst_data.h_vendor.vendor_cmd;
1644 	link_flags = loopback_mode->type;
1645 	timeout = loopback_mode->timeout * 100;
1646 
1647 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1648 	if (!pmboxq) {
1649 		rc = -ENOMEM;
1650 		goto loopback_mode_exit;
1651 	}
1652 	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1653 	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1654 	pmboxq->u.mb.mbxOwner = OWN_HOST;
1655 
1656 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1657 
1658 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1659 		/* wait for link down before proceeding */
1660 		i = 0;
1661 		while (phba->link_state != LPFC_LINK_DOWN) {
1662 			if (i++ > timeout) {
1663 				rc = -ETIMEDOUT;
1664 				goto loopback_mode_exit;
1665 			}
1666 			msleep(10);
1667 		}
1668 
1669 		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1670 		if (link_flags == INTERNAL_LOOP_BACK)
1671 			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1672 		else
1673 			pmboxq->u.mb.un.varInitLnk.link_flags =
1674 				FLAGS_TOPOLOGY_MODE_LOOP;
1675 
1676 		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1677 		pmboxq->u.mb.mbxOwner = OWN_HOST;
1678 
1679 		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1680 						     LPFC_MBOX_TMO);
1681 
1682 		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1683 			rc = -ENODEV;
1684 		else {
1685 			spin_lock_irq(&phba->hbalock);
1686 			phba->link_flag |= LS_LOOPBACK_MODE;
1687 			spin_unlock_irq(&phba->hbalock);
1688 			/* wait for the link attention interrupt */
1689 			msleep(100);
1690 
1691 			i = 0;
1692 			while (phba->link_state != LPFC_HBA_READY) {
1693 				if (i++ > timeout) {
1694 					rc = -ETIMEDOUT;
1695 					break;
1696 				}
1697 
1698 				msleep(10);
1699 			}
1700 		}
1701 
1702 	} else
1703 		rc = -ENODEV;
1704 
1705 loopback_mode_exit:
1706 	lpfc_bsg_diag_mode_exit(phba);
1707 
1708 	/*
1709 	 * Let SLI layer release mboxq if mbox command completed after timeout.
1710 	 */
1711 	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1712 		mempool_free(pmboxq, phba->mbox_mem_pool);
1713 
1714 job_error:
1715 	/* make error code available to userspace */
1716 	job->reply->result = rc;
1717 	/* complete the job back to userspace if no error */
1718 	if (rc == 0)
1719 		job->job_done(job);
1720 	return rc;
1721 }
1722 
1723 /**
1724  * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1725  * @phba: Pointer to HBA context object.
1726  * @diag: Flag for set link to diag or nomral operation state.
1727  *
1728  * This function is responsible for issuing a sli4 mailbox command for setting
1729  * link to either diag state or normal operation state.
1730  */
1731 static int
1732 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1733 {
1734 	LPFC_MBOXQ_t *pmboxq;
1735 	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1736 	uint32_t req_len, alloc_len;
1737 	int mbxstatus = MBX_SUCCESS, rc;
1738 
1739 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1740 	if (!pmboxq)
1741 		return -ENOMEM;
1742 
1743 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1744 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1745 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1746 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1747 				req_len, LPFC_SLI4_MBX_EMBED);
1748 	if (alloc_len != req_len) {
1749 		rc = -ENOMEM;
1750 		goto link_diag_state_set_out;
1751 	}
1752 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1753 			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1754 			diag, phba->sli4_hba.lnk_info.lnk_tp,
1755 			phba->sli4_hba.lnk_info.lnk_no);
1756 
1757 	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1758 	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1759 	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1760 	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1761 	       phba->sli4_hba.lnk_info.lnk_no);
1762 	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1763 	       phba->sli4_hba.lnk_info.lnk_tp);
1764 	if (diag)
1765 		bf_set(lpfc_mbx_set_diag_state_diag,
1766 		       &link_diag_state->u.req, 1);
1767 	else
1768 		bf_set(lpfc_mbx_set_diag_state_diag,
1769 		       &link_diag_state->u.req, 0);
1770 
1771 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1772 
1773 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1774 		rc = 0;
1775 	else
1776 		rc = -ENODEV;
1777 
1778 link_diag_state_set_out:
1779 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1780 		mempool_free(pmboxq, phba->mbox_mem_pool);
1781 
1782 	return rc;
1783 }
1784 
1785 /**
1786  * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1787  * @phba: Pointer to HBA context object.
1788  *
1789  * This function is responsible for issuing a sli4 mailbox command for setting
1790  * up internal loopback diagnostic.
1791  */
1792 static int
1793 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1794 {
1795 	LPFC_MBOXQ_t *pmboxq;
1796 	uint32_t req_len, alloc_len;
1797 	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1798 	int mbxstatus = MBX_SUCCESS, rc = 0;
1799 
1800 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1801 	if (!pmboxq)
1802 		return -ENOMEM;
1803 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1804 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1805 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1806 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1807 				req_len, LPFC_SLI4_MBX_EMBED);
1808 	if (alloc_len != req_len) {
1809 		mempool_free(pmboxq, phba->mbox_mem_pool);
1810 		return -ENOMEM;
1811 	}
1812 	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1813 	bf_set(lpfc_mbx_set_diag_state_link_num,
1814 	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
1815 	bf_set(lpfc_mbx_set_diag_state_link_type,
1816 	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1817 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1818 	       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1819 
1820 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1821 	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1822 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1823 				"3127 Failed setup loopback mode mailbox "
1824 				"command, rc:x%x, status:x%x\n", mbxstatus,
1825 				pmboxq->u.mb.mbxStatus);
1826 		rc = -ENODEV;
1827 	}
1828 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1829 		mempool_free(pmboxq, phba->mbox_mem_pool);
1830 	return rc;
1831 }
1832 
1833 /**
1834  * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1835  * @phba: Pointer to HBA context object.
1836  *
1837  * This function set up SLI4 FC port registrations for diagnostic run, which
1838  * includes all the rpis, vfi, and also vpi.
1839  */
1840 static int
1841 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1842 {
1843 	int rc;
1844 
1845 	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1846 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1847 				"3136 Port still had vfi registered: "
1848 				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1849 				phba->pport->fc_myDID, phba->fcf.fcfi,
1850 				phba->sli4_hba.vfi_ids[phba->pport->vfi],
1851 				phba->vpi_ids[phba->pport->vpi]);
1852 		return -EINVAL;
1853 	}
1854 	rc = lpfc_issue_reg_vfi(phba->pport);
1855 	return rc;
1856 }
1857 
1858 /**
1859  * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1860  * @phba: Pointer to HBA context object.
1861  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1862  *
1863  * This function is responsible for placing an sli4 port into diagnostic
1864  * loopback mode in order to perform a diagnostic loopback test.
1865  */
1866 static int
1867 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1868 {
1869 	struct diag_mode_set *loopback_mode;
1870 	uint32_t link_flags, timeout;
1871 	int i, rc = 0;
1872 
1873 	/* no data to return just the return code */
1874 	job->reply->reply_payload_rcv_len = 0;
1875 
1876 	if (job->request_len < sizeof(struct fc_bsg_request) +
1877 	    sizeof(struct diag_mode_set)) {
1878 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1879 				"3011 Received DIAG MODE request size:%d "
1880 				"below the minimum size:%d\n",
1881 				job->request_len,
1882 				(int)(sizeof(struct fc_bsg_request) +
1883 				sizeof(struct diag_mode_set)));
1884 		rc = -EINVAL;
1885 		goto job_error;
1886 	}
1887 
1888 	rc = lpfc_bsg_diag_mode_enter(phba);
1889 	if (rc)
1890 		goto job_error;
1891 
1892 	/* indicate we are in loobpack diagnostic mode */
1893 	spin_lock_irq(&phba->hbalock);
1894 	phba->link_flag |= LS_LOOPBACK_MODE;
1895 	spin_unlock_irq(&phba->hbalock);
1896 
1897 	/* reset port to start frome scratch */
1898 	rc = lpfc_selective_reset(phba);
1899 	if (rc)
1900 		goto job_error;
1901 
1902 	/* bring the link to diagnostic mode */
1903 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1904 			"3129 Bring link to diagnostic state.\n");
1905 	loopback_mode = (struct diag_mode_set *)
1906 		job->request->rqst_data.h_vendor.vendor_cmd;
1907 	link_flags = loopback_mode->type;
1908 	timeout = loopback_mode->timeout * 100;
1909 
1910 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1911 	if (rc) {
1912 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1913 				"3130 Failed to bring link to diagnostic "
1914 				"state, rc:x%x\n", rc);
1915 		goto loopback_mode_exit;
1916 	}
1917 
1918 	/* wait for link down before proceeding */
1919 	i = 0;
1920 	while (phba->link_state != LPFC_LINK_DOWN) {
1921 		if (i++ > timeout) {
1922 			rc = -ETIMEDOUT;
1923 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1924 					"3131 Timeout waiting for link to "
1925 					"diagnostic mode, timeout:%d ms\n",
1926 					timeout * 10);
1927 			goto loopback_mode_exit;
1928 		}
1929 		msleep(10);
1930 	}
1931 
1932 	/* set up loopback mode */
1933 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1934 			"3132 Set up loopback mode:x%x\n", link_flags);
1935 
1936 	if (link_flags == INTERNAL_LOOP_BACK)
1937 		rc = lpfc_sli4_bsg_set_internal_loopback(phba);
1938 	else if (link_flags == EXTERNAL_LOOP_BACK)
1939 		rc = lpfc_hba_init_link_fc_topology(phba,
1940 						    FLAGS_TOPOLOGY_MODE_PT_PT,
1941 						    MBX_NOWAIT);
1942 	else {
1943 		rc = -EINVAL;
1944 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
1945 				"3141 Loopback mode:x%x not supported\n",
1946 				link_flags);
1947 		goto loopback_mode_exit;
1948 	}
1949 
1950 	if (!rc) {
1951 		/* wait for the link attention interrupt */
1952 		msleep(100);
1953 		i = 0;
1954 		while (phba->link_state < LPFC_LINK_UP) {
1955 			if (i++ > timeout) {
1956 				rc = -ETIMEDOUT;
1957 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1958 					"3137 Timeout waiting for link up "
1959 					"in loopback mode, timeout:%d ms\n",
1960 					timeout * 10);
1961 				break;
1962 			}
1963 			msleep(10);
1964 		}
1965 	}
1966 
1967 	/* port resource registration setup for loopback diagnostic */
1968 	if (!rc) {
1969 		/* set up a none zero myDID for loopback test */
1970 		phba->pport->fc_myDID = 1;
1971 		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
1972 	} else
1973 		goto loopback_mode_exit;
1974 
1975 	if (!rc) {
1976 		/* wait for the port ready */
1977 		msleep(100);
1978 		i = 0;
1979 		while (phba->link_state != LPFC_HBA_READY) {
1980 			if (i++ > timeout) {
1981 				rc = -ETIMEDOUT;
1982 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1983 					"3133 Timeout waiting for port "
1984 					"loopback mode ready, timeout:%d ms\n",
1985 					timeout * 10);
1986 				break;
1987 			}
1988 			msleep(10);
1989 		}
1990 	}
1991 
1992 loopback_mode_exit:
1993 	/* clear loopback diagnostic mode */
1994 	if (rc) {
1995 		spin_lock_irq(&phba->hbalock);
1996 		phba->link_flag &= ~LS_LOOPBACK_MODE;
1997 		spin_unlock_irq(&phba->hbalock);
1998 	}
1999 	lpfc_bsg_diag_mode_exit(phba);
2000 
2001 job_error:
2002 	/* make error code available to userspace */
2003 	job->reply->result = rc;
2004 	/* complete the job back to userspace if no error */
2005 	if (rc == 0)
2006 		job->job_done(job);
2007 	return rc;
2008 }
2009 
2010 /**
2011  * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2012  * @job: LPFC_BSG_VENDOR_DIAG_MODE
2013  *
2014  * This function is responsible for responding to check and dispatch bsg diag
2015  * command from the user to proper driver action routines.
2016  */
2017 static int
2018 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
2019 {
2020 	struct Scsi_Host *shost;
2021 	struct lpfc_vport *vport;
2022 	struct lpfc_hba *phba;
2023 	int rc;
2024 
2025 	shost = job->shost;
2026 	if (!shost)
2027 		return -ENODEV;
2028 	vport = (struct lpfc_vport *)job->shost->hostdata;
2029 	if (!vport)
2030 		return -ENODEV;
2031 	phba = vport->phba;
2032 	if (!phba)
2033 		return -ENODEV;
2034 
2035 	if (phba->sli_rev < LPFC_SLI_REV4)
2036 		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2037 	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
2038 		 LPFC_SLI_INTF_IF_TYPE_2)
2039 		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2040 	else
2041 		rc = -ENODEV;
2042 
2043 	return rc;
2044 }
2045 
2046 /**
2047  * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2048  * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2049  *
2050  * This function is responsible for responding to check and dispatch bsg diag
2051  * command from the user to proper driver action routines.
2052  */
2053 static int
2054 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2055 {
2056 	struct Scsi_Host *shost;
2057 	struct lpfc_vport *vport;
2058 	struct lpfc_hba *phba;
2059 	struct diag_mode_set *loopback_mode_end_cmd;
2060 	uint32_t timeout;
2061 	int rc, i;
2062 
2063 	shost = job->shost;
2064 	if (!shost)
2065 		return -ENODEV;
2066 	vport = (struct lpfc_vport *)job->shost->hostdata;
2067 	if (!vport)
2068 		return -ENODEV;
2069 	phba = vport->phba;
2070 	if (!phba)
2071 		return -ENODEV;
2072 
2073 	if (phba->sli_rev < LPFC_SLI_REV4)
2074 		return -ENODEV;
2075 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2076 	    LPFC_SLI_INTF_IF_TYPE_2)
2077 		return -ENODEV;
2078 
2079 	/* clear loopback diagnostic mode */
2080 	spin_lock_irq(&phba->hbalock);
2081 	phba->link_flag &= ~LS_LOOPBACK_MODE;
2082 	spin_unlock_irq(&phba->hbalock);
2083 	loopback_mode_end_cmd = (struct diag_mode_set *)
2084 			job->request->rqst_data.h_vendor.vendor_cmd;
2085 	timeout = loopback_mode_end_cmd->timeout * 100;
2086 
2087 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2088 	if (rc) {
2089 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2090 				"3139 Failed to bring link to diagnostic "
2091 				"state, rc:x%x\n", rc);
2092 		goto loopback_mode_end_exit;
2093 	}
2094 
2095 	/* wait for link down before proceeding */
2096 	i = 0;
2097 	while (phba->link_state != LPFC_LINK_DOWN) {
2098 		if (i++ > timeout) {
2099 			rc = -ETIMEDOUT;
2100 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2101 					"3140 Timeout waiting for link to "
2102 					"diagnostic mode_end, timeout:%d ms\n",
2103 					timeout * 10);
2104 			/* there is nothing much we can do here */
2105 			break;
2106 		}
2107 		msleep(10);
2108 	}
2109 
2110 	/* reset port resource registrations */
2111 	rc = lpfc_selective_reset(phba);
2112 	phba->pport->fc_myDID = 0;
2113 
2114 loopback_mode_end_exit:
2115 	/* make return code available to userspace */
2116 	job->reply->result = rc;
2117 	/* complete the job back to userspace if no error */
2118 	if (rc == 0)
2119 		job->job_done(job);
2120 	return rc;
2121 }
2122 
2123 /**
2124  * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2125  * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2126  *
2127  * This function is to perform SLI4 diag link test request from the user
2128  * applicaiton.
2129  */
2130 static int
2131 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2132 {
2133 	struct Scsi_Host *shost;
2134 	struct lpfc_vport *vport;
2135 	struct lpfc_hba *phba;
2136 	LPFC_MBOXQ_t *pmboxq;
2137 	struct sli4_link_diag *link_diag_test_cmd;
2138 	uint32_t req_len, alloc_len;
2139 	uint32_t timeout;
2140 	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2141 	union lpfc_sli4_cfg_shdr *shdr;
2142 	uint32_t shdr_status, shdr_add_status;
2143 	struct diag_status *diag_status_reply;
2144 	int mbxstatus, rc = 0;
2145 
2146 	shost = job->shost;
2147 	if (!shost) {
2148 		rc = -ENODEV;
2149 		goto job_error;
2150 	}
2151 	vport = (struct lpfc_vport *)job->shost->hostdata;
2152 	if (!vport) {
2153 		rc = -ENODEV;
2154 		goto job_error;
2155 	}
2156 	phba = vport->phba;
2157 	if (!phba) {
2158 		rc = -ENODEV;
2159 		goto job_error;
2160 	}
2161 
2162 	if (phba->sli_rev < LPFC_SLI_REV4) {
2163 		rc = -ENODEV;
2164 		goto job_error;
2165 	}
2166 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2167 	    LPFC_SLI_INTF_IF_TYPE_2) {
2168 		rc = -ENODEV;
2169 		goto job_error;
2170 	}
2171 
2172 	if (job->request_len < sizeof(struct fc_bsg_request) +
2173 	    sizeof(struct sli4_link_diag)) {
2174 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2175 				"3013 Received LINK DIAG TEST request "
2176 				" size:%d below the minimum size:%d\n",
2177 				job->request_len,
2178 				(int)(sizeof(struct fc_bsg_request) +
2179 				sizeof(struct sli4_link_diag)));
2180 		rc = -EINVAL;
2181 		goto job_error;
2182 	}
2183 
2184 	rc = lpfc_bsg_diag_mode_enter(phba);
2185 	if (rc)
2186 		goto job_error;
2187 
2188 	link_diag_test_cmd = (struct sli4_link_diag *)
2189 			 job->request->rqst_data.h_vendor.vendor_cmd;
2190 	timeout = link_diag_test_cmd->timeout * 100;
2191 
2192 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2193 
2194 	if (rc)
2195 		goto job_error;
2196 
2197 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2198 	if (!pmboxq) {
2199 		rc = -ENOMEM;
2200 		goto link_diag_test_exit;
2201 	}
2202 
2203 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2204 		   sizeof(struct lpfc_sli4_cfg_mhdr));
2205 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2206 				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2207 				     req_len, LPFC_SLI4_MBX_EMBED);
2208 	if (alloc_len != req_len) {
2209 		rc = -ENOMEM;
2210 		goto link_diag_test_exit;
2211 	}
2212 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2213 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2214 	       phba->sli4_hba.lnk_info.lnk_no);
2215 	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2216 	       phba->sli4_hba.lnk_info.lnk_tp);
2217 	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2218 	       link_diag_test_cmd->test_id);
2219 	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2220 	       link_diag_test_cmd->loops);
2221 	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2222 	       link_diag_test_cmd->test_version);
2223 	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2224 	       link_diag_test_cmd->error_action);
2225 
2226 	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2227 
2228 	shdr = (union lpfc_sli4_cfg_shdr *)
2229 		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2230 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2231 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2232 	if (shdr_status || shdr_add_status || mbxstatus) {
2233 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2234 				"3010 Run link diag test mailbox failed with "
2235 				"mbx_status x%x status x%x, add_status x%x\n",
2236 				mbxstatus, shdr_status, shdr_add_status);
2237 	}
2238 
2239 	diag_status_reply = (struct diag_status *)
2240 			    job->reply->reply_data.vendor_reply.vendor_rsp;
2241 
2242 	if (job->reply_len <
2243 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2244 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2245 				"3012 Received Run link diag test reply "
2246 				"below minimum size (%d): reply_len:%d\n",
2247 				(int)(sizeof(struct fc_bsg_request) +
2248 				sizeof(struct diag_status)),
2249 				job->reply_len);
2250 		rc = -EINVAL;
2251 		goto job_error;
2252 	}
2253 
2254 	diag_status_reply->mbox_status = mbxstatus;
2255 	diag_status_reply->shdr_status = shdr_status;
2256 	diag_status_reply->shdr_add_status = shdr_add_status;
2257 
2258 link_diag_test_exit:
2259 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2260 
2261 	if (pmboxq)
2262 		mempool_free(pmboxq, phba->mbox_mem_pool);
2263 
2264 	lpfc_bsg_diag_mode_exit(phba);
2265 
2266 job_error:
2267 	/* make error code available to userspace */
2268 	job->reply->result = rc;
2269 	/* complete the job back to userspace if no error */
2270 	if (rc == 0)
2271 		job->job_done(job);
2272 	return rc;
2273 }
2274 
2275 /**
2276  * lpfcdiag_loop_self_reg - obtains a remote port login id
2277  * @phba: Pointer to HBA context object
2278  * @rpi: Pointer to a remote port login id
2279  *
2280  * This function obtains a remote port login id so the diag loopback test
2281  * can send and receive its own unsolicited CT command.
2282  **/
2283 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2284 {
2285 	LPFC_MBOXQ_t *mbox;
2286 	struct lpfc_dmabuf *dmabuff;
2287 	int status;
2288 
2289 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2290 	if (!mbox)
2291 		return -ENOMEM;
2292 
2293 	if (phba->sli_rev < LPFC_SLI_REV4)
2294 		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2295 				(uint8_t *)&phba->pport->fc_sparam,
2296 				mbox, *rpi);
2297 	else {
2298 		*rpi = lpfc_sli4_alloc_rpi(phba);
2299 		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2300 				phba->pport->fc_myDID,
2301 				(uint8_t *)&phba->pport->fc_sparam,
2302 				mbox, *rpi);
2303 	}
2304 
2305 	if (status) {
2306 		mempool_free(mbox, phba->mbox_mem_pool);
2307 		if (phba->sli_rev == LPFC_SLI_REV4)
2308 			lpfc_sli4_free_rpi(phba, *rpi);
2309 		return -ENOMEM;
2310 	}
2311 
2312 	dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2313 	mbox->context1 = NULL;
2314 	mbox->context2 = NULL;
2315 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2316 
2317 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2318 		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2319 		kfree(dmabuff);
2320 		if (status != MBX_TIMEOUT)
2321 			mempool_free(mbox, phba->mbox_mem_pool);
2322 		if (phba->sli_rev == LPFC_SLI_REV4)
2323 			lpfc_sli4_free_rpi(phba, *rpi);
2324 		return -ENODEV;
2325 	}
2326 
2327 	if (phba->sli_rev < LPFC_SLI_REV4)
2328 		*rpi = mbox->u.mb.un.varWords[0];
2329 
2330 	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2331 	kfree(dmabuff);
2332 	mempool_free(mbox, phba->mbox_mem_pool);
2333 	return 0;
2334 }
2335 
2336 /**
2337  * lpfcdiag_loop_self_unreg - unregs from the rpi
2338  * @phba: Pointer to HBA context object
2339  * @rpi: Remote port login id
2340  *
2341  * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2342  **/
2343 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2344 {
2345 	LPFC_MBOXQ_t *mbox;
2346 	int status;
2347 
2348 	/* Allocate mboxq structure */
2349 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2350 	if (mbox == NULL)
2351 		return -ENOMEM;
2352 
2353 	if (phba->sli_rev < LPFC_SLI_REV4)
2354 		lpfc_unreg_login(phba, 0, rpi, mbox);
2355 	else
2356 		lpfc_unreg_login(phba, phba->pport->vpi,
2357 				 phba->sli4_hba.rpi_ids[rpi], mbox);
2358 
2359 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2360 
2361 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2362 		if (status != MBX_TIMEOUT)
2363 			mempool_free(mbox, phba->mbox_mem_pool);
2364 		return -EIO;
2365 	}
2366 	mempool_free(mbox, phba->mbox_mem_pool);
2367 	if (phba->sli_rev == LPFC_SLI_REV4)
2368 		lpfc_sli4_free_rpi(phba, rpi);
2369 	return 0;
2370 }
2371 
2372 /**
2373  * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2374  * @phba: Pointer to HBA context object
2375  * @rpi: Remote port login id
2376  * @txxri: Pointer to transmit exchange id
2377  * @rxxri: Pointer to response exchabge id
2378  *
2379  * This function obtains the transmit and receive ids required to send
2380  * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2381  * flags are used to the unsolicted response handler is able to process
2382  * the ct command sent on the same port.
2383  **/
2384 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2385 			 uint16_t *txxri, uint16_t * rxxri)
2386 {
2387 	struct lpfc_bsg_event *evt;
2388 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2389 	IOCB_t *cmd, *rsp;
2390 	struct lpfc_dmabuf *dmabuf;
2391 	struct ulp_bde64 *bpl = NULL;
2392 	struct lpfc_sli_ct_request *ctreq = NULL;
2393 	int ret_val = 0;
2394 	int time_left;
2395 	int iocb_stat = 0;
2396 	unsigned long flags;
2397 
2398 	*txxri = 0;
2399 	*rxxri = 0;
2400 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2401 				SLI_CT_ELX_LOOPBACK);
2402 	if (!evt)
2403 		return -ENOMEM;
2404 
2405 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2406 	list_add(&evt->node, &phba->ct_ev_waiters);
2407 	lpfc_bsg_event_ref(evt);
2408 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2409 
2410 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2411 	rspiocbq = lpfc_sli_get_iocbq(phba);
2412 
2413 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2414 	if (dmabuf) {
2415 		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2416 		if (dmabuf->virt) {
2417 			INIT_LIST_HEAD(&dmabuf->list);
2418 			bpl = (struct ulp_bde64 *) dmabuf->virt;
2419 			memset(bpl, 0, sizeof(*bpl));
2420 			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2421 			bpl->addrHigh =
2422 				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2423 					sizeof(*bpl)));
2424 			bpl->addrLow =
2425 				le32_to_cpu(putPaddrLow(dmabuf->phys +
2426 					sizeof(*bpl)));
2427 			bpl->tus.f.bdeFlags = 0;
2428 			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2429 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2430 		}
2431 	}
2432 
2433 	if (cmdiocbq == NULL || rspiocbq == NULL ||
2434 	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2435 		dmabuf->virt == NULL) {
2436 		ret_val = -ENOMEM;
2437 		goto err_get_xri_exit;
2438 	}
2439 
2440 	cmd = &cmdiocbq->iocb;
2441 	rsp = &rspiocbq->iocb;
2442 
2443 	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2444 
2445 	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2446 	ctreq->RevisionId.bits.InId = 0;
2447 	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2448 	ctreq->FsSubType = 0;
2449 	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2450 	ctreq->CommandResponse.bits.Size = 0;
2451 
2452 
2453 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2454 	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2455 	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2456 	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2457 
2458 	cmd->un.xseq64.w5.hcsw.Fctl = LA;
2459 	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2460 	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2461 	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2462 
2463 	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2464 	cmd->ulpBdeCount = 1;
2465 	cmd->ulpLe = 1;
2466 	cmd->ulpClass = CLASS3;
2467 	cmd->ulpContext = rpi;
2468 
2469 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2470 	cmdiocbq->vport = phba->pport;
2471 
2472 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2473 				rspiocbq,
2474 				(phba->fc_ratov * 2)
2475 				+ LPFC_DRVR_TIMEOUT);
2476 	if (iocb_stat) {
2477 		ret_val = -EIO;
2478 		goto err_get_xri_exit;
2479 	}
2480 	*txxri =  rsp->ulpContext;
2481 
2482 	evt->waiting = 1;
2483 	evt->wait_time_stamp = jiffies;
2484 	time_left = wait_event_interruptible_timeout(
2485 		evt->wq, !list_empty(&evt->events_to_see),
2486 		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2487 	if (list_empty(&evt->events_to_see))
2488 		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2489 	else {
2490 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2491 		list_move(evt->events_to_see.prev, &evt->events_to_get);
2492 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2493 		*rxxri = (list_entry(evt->events_to_get.prev,
2494 				     typeof(struct event_data),
2495 				     node))->immed_dat;
2496 	}
2497 	evt->waiting = 0;
2498 
2499 err_get_xri_exit:
2500 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2501 	lpfc_bsg_event_unref(evt); /* release ref */
2502 	lpfc_bsg_event_unref(evt); /* delete */
2503 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2504 
2505 	if (dmabuf) {
2506 		if (dmabuf->virt)
2507 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2508 		kfree(dmabuf);
2509 	}
2510 
2511 	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2512 		lpfc_sli_release_iocbq(phba, cmdiocbq);
2513 	if (rspiocbq)
2514 		lpfc_sli_release_iocbq(phba, rspiocbq);
2515 	return ret_val;
2516 }
2517 
2518 /**
2519  * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2520  * @phba: Pointer to HBA context object
2521  *
2522  * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2523  * retruns the pointer to the buffer.
2524  **/
2525 static struct lpfc_dmabuf *
2526 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2527 {
2528 	struct lpfc_dmabuf *dmabuf;
2529 	struct pci_dev *pcidev = phba->pcidev;
2530 
2531 	/* allocate dma buffer struct */
2532 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2533 	if (!dmabuf)
2534 		return NULL;
2535 
2536 	INIT_LIST_HEAD(&dmabuf->list);
2537 
2538 	/* now, allocate dma buffer */
2539 	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2540 					  &(dmabuf->phys), GFP_KERNEL);
2541 
2542 	if (!dmabuf->virt) {
2543 		kfree(dmabuf);
2544 		return NULL;
2545 	}
2546 	memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2547 
2548 	return dmabuf;
2549 }
2550 
2551 /**
2552  * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2553  * @phba: Pointer to HBA context object.
2554  * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2555  *
2556  * This routine just simply frees a dma buffer and its associated buffer
2557  * descriptor referred by @dmabuf.
2558  **/
2559 static void
2560 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2561 {
2562 	struct pci_dev *pcidev = phba->pcidev;
2563 
2564 	if (!dmabuf)
2565 		return;
2566 
2567 	if (dmabuf->virt)
2568 		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2569 				  dmabuf->virt, dmabuf->phys);
2570 	kfree(dmabuf);
2571 	return;
2572 }
2573 
2574 /**
2575  * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2576  * @phba: Pointer to HBA context object.
2577  * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2578  *
2579  * This routine just simply frees all dma buffers and their associated buffer
2580  * descriptors referred by @dmabuf_list.
2581  **/
2582 static void
2583 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2584 			    struct list_head *dmabuf_list)
2585 {
2586 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2587 
2588 	if (list_empty(dmabuf_list))
2589 		return;
2590 
2591 	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2592 		list_del_init(&dmabuf->list);
2593 		lpfc_bsg_dma_page_free(phba, dmabuf);
2594 	}
2595 	return;
2596 }
2597 
2598 /**
2599  * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2600  * @phba: Pointer to HBA context object
2601  * @bpl: Pointer to 64 bit bde structure
2602  * @size: Number of bytes to process
2603  * @nocopydata: Flag to copy user data into the allocated buffer
2604  *
2605  * This function allocates page size buffers and populates an lpfc_dmabufext.
2606  * If allowed the user data pointed to with indataptr is copied into the kernel
2607  * memory. The chained list of page size buffers is returned.
2608  **/
2609 static struct lpfc_dmabufext *
2610 diag_cmd_data_alloc(struct lpfc_hba *phba,
2611 		   struct ulp_bde64 *bpl, uint32_t size,
2612 		   int nocopydata)
2613 {
2614 	struct lpfc_dmabufext *mlist = NULL;
2615 	struct lpfc_dmabufext *dmp;
2616 	int cnt, offset = 0, i = 0;
2617 	struct pci_dev *pcidev;
2618 
2619 	pcidev = phba->pcidev;
2620 
2621 	while (size) {
2622 		/* We get chunks of 4K */
2623 		if (size > BUF_SZ_4K)
2624 			cnt = BUF_SZ_4K;
2625 		else
2626 			cnt = size;
2627 
2628 		/* allocate struct lpfc_dmabufext buffer header */
2629 		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2630 		if (!dmp)
2631 			goto out;
2632 
2633 		INIT_LIST_HEAD(&dmp->dma.list);
2634 
2635 		/* Queue it to a linked list */
2636 		if (mlist)
2637 			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2638 		else
2639 			mlist = dmp;
2640 
2641 		/* allocate buffer */
2642 		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2643 						   cnt,
2644 						   &(dmp->dma.phys),
2645 						   GFP_KERNEL);
2646 
2647 		if (!dmp->dma.virt)
2648 			goto out;
2649 
2650 		dmp->size = cnt;
2651 
2652 		if (nocopydata) {
2653 			bpl->tus.f.bdeFlags = 0;
2654 			pci_dma_sync_single_for_device(phba->pcidev,
2655 				dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2656 
2657 		} else {
2658 			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2659 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2660 		}
2661 
2662 		/* build buffer ptr list for IOCB */
2663 		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2664 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2665 		bpl->tus.f.bdeSize = (ushort) cnt;
2666 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2667 		bpl++;
2668 
2669 		i++;
2670 		offset += cnt;
2671 		size -= cnt;
2672 	}
2673 
2674 	mlist->flag = i;
2675 	return mlist;
2676 out:
2677 	diag_cmd_data_free(phba, mlist);
2678 	return NULL;
2679 }
2680 
2681 /**
2682  * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2683  * @phba: Pointer to HBA context object
2684  * @rxxri: Receive exchange id
2685  * @len: Number of data bytes
2686  *
2687  * This function allocates and posts a data buffer of sufficient size to receive
2688  * an unsolicted CT command.
2689  **/
2690 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2691 			     size_t len)
2692 {
2693 	struct lpfc_sli *psli = &phba->sli;
2694 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2695 	struct lpfc_iocbq *cmdiocbq;
2696 	IOCB_t *cmd = NULL;
2697 	struct list_head head, *curr, *next;
2698 	struct lpfc_dmabuf *rxbmp;
2699 	struct lpfc_dmabuf *dmp;
2700 	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2701 	struct ulp_bde64 *rxbpl = NULL;
2702 	uint32_t num_bde;
2703 	struct lpfc_dmabufext *rxbuffer = NULL;
2704 	int ret_val = 0;
2705 	int iocb_stat;
2706 	int i = 0;
2707 
2708 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2709 	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2710 	if (rxbmp != NULL) {
2711 		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2712 		if (rxbmp->virt) {
2713 			INIT_LIST_HEAD(&rxbmp->list);
2714 			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2715 			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2716 		}
2717 	}
2718 
2719 	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
2720 		ret_val = -ENOMEM;
2721 		goto err_post_rxbufs_exit;
2722 	}
2723 
2724 	/* Queue buffers for the receive exchange */
2725 	num_bde = (uint32_t)rxbuffer->flag;
2726 	dmp = &rxbuffer->dma;
2727 
2728 	cmd = &cmdiocbq->iocb;
2729 	i = 0;
2730 
2731 	INIT_LIST_HEAD(&head);
2732 	list_add_tail(&head, &dmp->list);
2733 	list_for_each_safe(curr, next, &head) {
2734 		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2735 		list_del(curr);
2736 
2737 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2738 			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2739 			cmd->un.quexri64cx.buff.bde.addrHigh =
2740 				putPaddrHigh(mp[i]->phys);
2741 			cmd->un.quexri64cx.buff.bde.addrLow =
2742 				putPaddrLow(mp[i]->phys);
2743 			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2744 				((struct lpfc_dmabufext *)mp[i])->size;
2745 			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2746 			cmd->ulpCommand = CMD_QUE_XRI64_CX;
2747 			cmd->ulpPU = 0;
2748 			cmd->ulpLe = 1;
2749 			cmd->ulpBdeCount = 1;
2750 			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2751 
2752 		} else {
2753 			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2754 			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2755 			cmd->un.cont64[i].tus.f.bdeSize =
2756 				((struct lpfc_dmabufext *)mp[i])->size;
2757 					cmd->ulpBdeCount = ++i;
2758 
2759 			if ((--num_bde > 0) && (i < 2))
2760 				continue;
2761 
2762 			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2763 			cmd->ulpLe = 1;
2764 		}
2765 
2766 		cmd->ulpClass = CLASS3;
2767 		cmd->ulpContext = rxxri;
2768 
2769 		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2770 						0);
2771 		if (iocb_stat == IOCB_ERROR) {
2772 			diag_cmd_data_free(phba,
2773 				(struct lpfc_dmabufext *)mp[0]);
2774 			if (mp[1])
2775 				diag_cmd_data_free(phba,
2776 					  (struct lpfc_dmabufext *)mp[1]);
2777 			dmp = list_entry(next, struct lpfc_dmabuf, list);
2778 			ret_val = -EIO;
2779 			goto err_post_rxbufs_exit;
2780 		}
2781 
2782 		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2783 		if (mp[1]) {
2784 			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2785 			mp[1] = NULL;
2786 		}
2787 
2788 		/* The iocb was freed by lpfc_sli_issue_iocb */
2789 		cmdiocbq = lpfc_sli_get_iocbq(phba);
2790 		if (!cmdiocbq) {
2791 			dmp = list_entry(next, struct lpfc_dmabuf, list);
2792 			ret_val = -EIO;
2793 			goto err_post_rxbufs_exit;
2794 		}
2795 
2796 		cmd = &cmdiocbq->iocb;
2797 		i = 0;
2798 	}
2799 	list_del(&head);
2800 
2801 err_post_rxbufs_exit:
2802 
2803 	if (rxbmp) {
2804 		if (rxbmp->virt)
2805 			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2806 		kfree(rxbmp);
2807 	}
2808 
2809 	if (cmdiocbq)
2810 		lpfc_sli_release_iocbq(phba, cmdiocbq);
2811 	return ret_val;
2812 }
2813 
2814 /**
2815  * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2816  * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2817  *
2818  * This function receives a user data buffer to be transmitted and received on
2819  * the same port, the link must be up and in loopback mode prior
2820  * to being called.
2821  * 1. A kernel buffer is allocated to copy the user data into.
2822  * 2. The port registers with "itself".
2823  * 3. The transmit and receive exchange ids are obtained.
2824  * 4. The receive exchange id is posted.
2825  * 5. A new els loopback event is created.
2826  * 6. The command and response iocbs are allocated.
2827  * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2828  *
2829  * This function is meant to be called n times while the port is in loopback
2830  * so it is the apps responsibility to issue a reset to take the port out
2831  * of loopback mode.
2832  **/
2833 static int
2834 lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2835 {
2836 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2837 	struct lpfc_hba *phba = vport->phba;
2838 	struct diag_mode_test *diag_mode;
2839 	struct lpfc_bsg_event *evt;
2840 	struct event_data *evdat;
2841 	struct lpfc_sli *psli = &phba->sli;
2842 	uint32_t size;
2843 	uint32_t full_size;
2844 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2845 	uint16_t rpi = 0;
2846 	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
2847 	IOCB_t *cmd, *rsp = NULL;
2848 	struct lpfc_sli_ct_request *ctreq;
2849 	struct lpfc_dmabuf *txbmp;
2850 	struct ulp_bde64 *txbpl = NULL;
2851 	struct lpfc_dmabufext *txbuffer = NULL;
2852 	struct list_head head;
2853 	struct lpfc_dmabuf  *curr;
2854 	uint16_t txxri = 0, rxxri;
2855 	uint32_t num_bde;
2856 	uint8_t *ptr = NULL, *rx_databuf = NULL;
2857 	int rc = 0;
2858 	int time_left;
2859 	int iocb_stat;
2860 	unsigned long flags;
2861 	void *dataout = NULL;
2862 	uint32_t total_mem;
2863 
2864 	/* in case no data is returned return just the return code */
2865 	job->reply->reply_payload_rcv_len = 0;
2866 
2867 	if (job->request_len <
2868 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2869 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2870 				"2739 Received DIAG TEST request below minimum "
2871 				"size\n");
2872 		rc = -EINVAL;
2873 		goto loopback_test_exit;
2874 	}
2875 
2876 	if (job->request_payload.payload_len !=
2877 		job->reply_payload.payload_len) {
2878 		rc = -EINVAL;
2879 		goto loopback_test_exit;
2880 	}
2881 	diag_mode = (struct diag_mode_test *)
2882 		job->request->rqst_data.h_vendor.vendor_cmd;
2883 
2884 	if ((phba->link_state == LPFC_HBA_ERROR) ||
2885 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2886 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2887 		rc = -EACCES;
2888 		goto loopback_test_exit;
2889 	}
2890 
2891 	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2892 		rc = -EACCES;
2893 		goto loopback_test_exit;
2894 	}
2895 
2896 	size = job->request_payload.payload_len;
2897 	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2898 
2899 	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2900 		rc = -ERANGE;
2901 		goto loopback_test_exit;
2902 	}
2903 
2904 	if (full_size >= BUF_SZ_4K) {
2905 		/*
2906 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2907 		 * then we allocate 64k and re-use that buffer over and over to
2908 		 * xfer the whole block. This is because Linux kernel has a
2909 		 * problem allocating more than 120k of kernel space memory. Saw
2910 		 * problem with GET_FCPTARGETMAPPING...
2911 		 */
2912 		if (size <= (64 * 1024))
2913 			total_mem = full_size;
2914 		else
2915 			total_mem = 64 * 1024;
2916 	} else
2917 		/* Allocate memory for ioctl data */
2918 		total_mem = BUF_SZ_4K;
2919 
2920 	dataout = kmalloc(total_mem, GFP_KERNEL);
2921 	if (dataout == NULL) {
2922 		rc = -ENOMEM;
2923 		goto loopback_test_exit;
2924 	}
2925 
2926 	ptr = dataout;
2927 	ptr += ELX_LOOPBACK_HEADER_SZ;
2928 	sg_copy_to_buffer(job->request_payload.sg_list,
2929 				job->request_payload.sg_cnt,
2930 				ptr, size);
2931 	rc = lpfcdiag_loop_self_reg(phba, &rpi);
2932 	if (rc)
2933 		goto loopback_test_exit;
2934 
2935 	if (phba->sli_rev < LPFC_SLI_REV4) {
2936 		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2937 		if (rc) {
2938 			lpfcdiag_loop_self_unreg(phba, rpi);
2939 			goto loopback_test_exit;
2940 		}
2941 
2942 		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2943 		if (rc) {
2944 			lpfcdiag_loop_self_unreg(phba, rpi);
2945 			goto loopback_test_exit;
2946 		}
2947 	}
2948 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2949 				SLI_CT_ELX_LOOPBACK);
2950 	if (!evt) {
2951 		lpfcdiag_loop_self_unreg(phba, rpi);
2952 		rc = -ENOMEM;
2953 		goto loopback_test_exit;
2954 	}
2955 
2956 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2957 	list_add(&evt->node, &phba->ct_ev_waiters);
2958 	lpfc_bsg_event_ref(evt);
2959 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2960 
2961 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2962 	if (phba->sli_rev < LPFC_SLI_REV4)
2963 		rspiocbq = lpfc_sli_get_iocbq(phba);
2964 	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2965 
2966 	if (txbmp) {
2967 		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2968 		if (txbmp->virt) {
2969 			INIT_LIST_HEAD(&txbmp->list);
2970 			txbpl = (struct ulp_bde64 *) txbmp->virt;
2971 			txbuffer = diag_cmd_data_alloc(phba,
2972 							txbpl, full_size, 0);
2973 		}
2974 	}
2975 
2976 	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
2977 		rc = -ENOMEM;
2978 		goto err_loopback_test_exit;
2979 	}
2980 	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
2981 		rc = -ENOMEM;
2982 		goto err_loopback_test_exit;
2983 	}
2984 
2985 	cmd = &cmdiocbq->iocb;
2986 	if (phba->sli_rev < LPFC_SLI_REV4)
2987 		rsp = &rspiocbq->iocb;
2988 
2989 	INIT_LIST_HEAD(&head);
2990 	list_add_tail(&head, &txbuffer->dma.list);
2991 	list_for_each_entry(curr, &head, list) {
2992 		segment_len = ((struct lpfc_dmabufext *)curr)->size;
2993 		if (current_offset == 0) {
2994 			ctreq = curr->virt;
2995 			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2996 			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2997 			ctreq->RevisionId.bits.InId = 0;
2998 			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2999 			ctreq->FsSubType = 0;
3000 			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3001 			ctreq->CommandResponse.bits.Size   = size;
3002 			segment_offset = ELX_LOOPBACK_HEADER_SZ;
3003 		} else
3004 			segment_offset = 0;
3005 
3006 		BUG_ON(segment_offset >= segment_len);
3007 		memcpy(curr->virt + segment_offset,
3008 			ptr + current_offset,
3009 			segment_len - segment_offset);
3010 
3011 		current_offset += segment_len - segment_offset;
3012 		BUG_ON(current_offset > size);
3013 	}
3014 	list_del(&head);
3015 
3016 	/* Build the XMIT_SEQUENCE iocb */
3017 	num_bde = (uint32_t)txbuffer->flag;
3018 
3019 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3020 	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3021 	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3022 	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3023 
3024 	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3025 	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3026 	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3027 	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3028 
3029 	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3030 	cmd->ulpBdeCount = 1;
3031 	cmd->ulpLe = 1;
3032 	cmd->ulpClass = CLASS3;
3033 
3034 	if (phba->sli_rev < LPFC_SLI_REV4) {
3035 		cmd->ulpContext = txxri;
3036 	} else {
3037 		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3038 		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3039 		cmdiocbq->context3 = txbmp;
3040 		cmdiocbq->sli4_xritag = NO_XRI;
3041 		cmd->unsli3.rcvsli3.ox_id = 0xffff;
3042 	}
3043 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3044 	cmdiocbq->vport = phba->pport;
3045 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3046 					     rspiocbq, (phba->fc_ratov * 2) +
3047 					     LPFC_DRVR_TIMEOUT);
3048 
3049 	if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
3050 					   (rsp->ulpStatus != IOCB_SUCCESS))) {
3051 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3052 				"3126 Failed loopback test issue iocb: "
3053 				"iocb_stat:x%x\n", iocb_stat);
3054 		rc = -EIO;
3055 		goto err_loopback_test_exit;
3056 	}
3057 
3058 	evt->waiting = 1;
3059 	time_left = wait_event_interruptible_timeout(
3060 		evt->wq, !list_empty(&evt->events_to_see),
3061 		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
3062 	evt->waiting = 0;
3063 	if (list_empty(&evt->events_to_see)) {
3064 		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3065 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3066 				"3125 Not receiving unsolicited event, "
3067 				"rc:x%x\n", rc);
3068 	} else {
3069 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3070 		list_move(evt->events_to_see.prev, &evt->events_to_get);
3071 		evdat = list_entry(evt->events_to_get.prev,
3072 				   typeof(*evdat), node);
3073 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3074 		rx_databuf = evdat->data;
3075 		if (evdat->len != full_size) {
3076 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3077 				"1603 Loopback test did not receive expected "
3078 				"data length. actual length 0x%x expected "
3079 				"length 0x%x\n",
3080 				evdat->len, full_size);
3081 			rc = -EIO;
3082 		} else if (rx_databuf == NULL)
3083 			rc = -EIO;
3084 		else {
3085 			rc = IOCB_SUCCESS;
3086 			/* skip over elx loopback header */
3087 			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3088 			job->reply->reply_payload_rcv_len =
3089 				sg_copy_from_buffer(job->reply_payload.sg_list,
3090 						    job->reply_payload.sg_cnt,
3091 						    rx_databuf, size);
3092 			job->reply->reply_payload_rcv_len = size;
3093 		}
3094 	}
3095 
3096 err_loopback_test_exit:
3097 	lpfcdiag_loop_self_unreg(phba, rpi);
3098 
3099 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3100 	lpfc_bsg_event_unref(evt); /* release ref */
3101 	lpfc_bsg_event_unref(evt); /* delete */
3102 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3103 
3104 	if (cmdiocbq != NULL)
3105 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3106 
3107 	if (rspiocbq != NULL)
3108 		lpfc_sli_release_iocbq(phba, rspiocbq);
3109 
3110 	if (txbmp != NULL) {
3111 		if (txbpl != NULL) {
3112 			if (txbuffer != NULL)
3113 				diag_cmd_data_free(phba, txbuffer);
3114 			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3115 		}
3116 		kfree(txbmp);
3117 	}
3118 
3119 loopback_test_exit:
3120 	kfree(dataout);
3121 	/* make error code available to userspace */
3122 	job->reply->result = rc;
3123 	job->dd_data = NULL;
3124 	/* complete the job back to userspace if no error */
3125 	if (rc == IOCB_SUCCESS)
3126 		job->job_done(job);
3127 	return rc;
3128 }
3129 
3130 /**
3131  * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3132  * @job: GET_DFC_REV fc_bsg_job
3133  **/
3134 static int
3135 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
3136 {
3137 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3138 	struct lpfc_hba *phba = vport->phba;
3139 	struct get_mgmt_rev *event_req;
3140 	struct get_mgmt_rev_reply *event_reply;
3141 	int rc = 0;
3142 
3143 	if (job->request_len <
3144 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3145 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3146 				"2740 Received GET_DFC_REV request below "
3147 				"minimum size\n");
3148 		rc = -EINVAL;
3149 		goto job_error;
3150 	}
3151 
3152 	event_req = (struct get_mgmt_rev *)
3153 		job->request->rqst_data.h_vendor.vendor_cmd;
3154 
3155 	event_reply = (struct get_mgmt_rev_reply *)
3156 		job->reply->reply_data.vendor_reply.vendor_rsp;
3157 
3158 	if (job->reply_len <
3159 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
3160 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3161 				"2741 Received GET_DFC_REV reply below "
3162 				"minimum size\n");
3163 		rc = -EINVAL;
3164 		goto job_error;
3165 	}
3166 
3167 	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3168 	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3169 job_error:
3170 	job->reply->result = rc;
3171 	if (rc == 0)
3172 		job->job_done(job);
3173 	return rc;
3174 }
3175 
3176 /**
3177  * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3178  * @phba: Pointer to HBA context object.
3179  * @pmboxq: Pointer to mailbox command.
3180  *
3181  * This is completion handler function for mailbox commands issued from
3182  * lpfc_bsg_issue_mbox function. This function is called by the
3183  * mailbox event handler function with no lock held. This function
3184  * will wake up thread waiting on the wait queue pointed by context1
3185  * of the mailbox.
3186  **/
3187 void
3188 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3189 {
3190 	struct bsg_job_data *dd_data;
3191 	struct fc_bsg_job *job;
3192 	uint32_t size;
3193 	unsigned long flags;
3194 	uint8_t *pmb, *pmb_buf;
3195 
3196 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3197 	dd_data = pmboxq->context1;
3198 	/* job already timed out? */
3199 	if (!dd_data) {
3200 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3201 		return;
3202 	}
3203 
3204 	/*
3205 	 * The outgoing buffer is readily referred from the dma buffer,
3206 	 * just need to get header part from mailboxq structure.
3207 	 */
3208 	pmb = (uint8_t *)&pmboxq->u.mb;
3209 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3210 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3211 
3212 	job = dd_data->context_un.mbox.set_job;
3213 	if (job) {
3214 		size = job->reply_payload.payload_len;
3215 		job->reply->reply_payload_rcv_len =
3216 			sg_copy_from_buffer(job->reply_payload.sg_list,
3217 					    job->reply_payload.sg_cnt,
3218 					    pmb_buf, size);
3219 		/* need to hold the lock until we set job->dd_data to NULL
3220 		 * to hold off the timeout handler returning to the mid-layer
3221 		 * while we are still processing the job.
3222 		 */
3223 		job->dd_data = NULL;
3224 		dd_data->context_un.mbox.set_job = NULL;
3225 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3226 	} else {
3227 		dd_data->context_un.mbox.set_job = NULL;
3228 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3229 	}
3230 
3231 	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3232 	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3233 	kfree(dd_data);
3234 
3235 	if (job) {
3236 		job->reply->result = 0;
3237 		job->job_done(job);
3238 	}
3239 	return;
3240 }
3241 
3242 /**
3243  * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3244  * @phba: Pointer to HBA context object.
3245  * @mb: Pointer to a mailbox object.
3246  * @vport: Pointer to a vport object.
3247  *
3248  * Some commands require the port to be offline, some may not be called from
3249  * the application.
3250  **/
3251 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3252 	MAILBOX_t *mb, struct lpfc_vport *vport)
3253 {
3254 	/* return negative error values for bsg job */
3255 	switch (mb->mbxCommand) {
3256 	/* Offline only */
3257 	case MBX_INIT_LINK:
3258 	case MBX_DOWN_LINK:
3259 	case MBX_CONFIG_LINK:
3260 	case MBX_CONFIG_RING:
3261 	case MBX_RESET_RING:
3262 	case MBX_UNREG_LOGIN:
3263 	case MBX_CLEAR_LA:
3264 	case MBX_DUMP_CONTEXT:
3265 	case MBX_RUN_DIAGS:
3266 	case MBX_RESTART:
3267 	case MBX_SET_MASK:
3268 		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3269 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3270 				"2743 Command 0x%x is illegal in on-line "
3271 				"state\n",
3272 				mb->mbxCommand);
3273 			return -EPERM;
3274 		}
3275 	case MBX_WRITE_NV:
3276 	case MBX_WRITE_VPARMS:
3277 	case MBX_LOAD_SM:
3278 	case MBX_READ_NV:
3279 	case MBX_READ_CONFIG:
3280 	case MBX_READ_RCONFIG:
3281 	case MBX_READ_STATUS:
3282 	case MBX_READ_XRI:
3283 	case MBX_READ_REV:
3284 	case MBX_READ_LNK_STAT:
3285 	case MBX_DUMP_MEMORY:
3286 	case MBX_DOWN_LOAD:
3287 	case MBX_UPDATE_CFG:
3288 	case MBX_KILL_BOARD:
3289 	case MBX_LOAD_AREA:
3290 	case MBX_LOAD_EXP_ROM:
3291 	case MBX_BEACON:
3292 	case MBX_DEL_LD_ENTRY:
3293 	case MBX_SET_DEBUG:
3294 	case MBX_WRITE_WWN:
3295 	case MBX_SLI4_CONFIG:
3296 	case MBX_READ_EVENT_LOG:
3297 	case MBX_READ_EVENT_LOG_STATUS:
3298 	case MBX_WRITE_EVENT_LOG:
3299 	case MBX_PORT_CAPABILITIES:
3300 	case MBX_PORT_IOV_CONTROL:
3301 	case MBX_RUN_BIU_DIAG64:
3302 		break;
3303 	case MBX_SET_VARIABLE:
3304 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3305 			"1226 mbox: set_variable 0x%x, 0x%x\n",
3306 			mb->un.varWords[0],
3307 			mb->un.varWords[1]);
3308 		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3309 			&& (mb->un.varWords[1] == 1)) {
3310 			phba->wait_4_mlo_maint_flg = 1;
3311 		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
3312 			spin_lock_irq(&phba->hbalock);
3313 			phba->link_flag &= ~LS_LOOPBACK_MODE;
3314 			spin_unlock_irq(&phba->hbalock);
3315 			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3316 		}
3317 		break;
3318 	case MBX_READ_SPARM64:
3319 	case MBX_READ_TOPOLOGY:
3320 	case MBX_REG_LOGIN:
3321 	case MBX_REG_LOGIN64:
3322 	case MBX_CONFIG_PORT:
3323 	case MBX_RUN_BIU_DIAG:
3324 	default:
3325 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3326 			"2742 Unknown Command 0x%x\n",
3327 			mb->mbxCommand);
3328 		return -EPERM;
3329 	}
3330 
3331 	return 0; /* ok */
3332 }
3333 
3334 /**
3335  * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3336  * @phba: Pointer to HBA context object.
3337  *
3338  * This is routine clean up and reset BSG handling of multi-buffer mbox
3339  * command session.
3340  **/
3341 static void
3342 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3343 {
3344 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3345 		return;
3346 
3347 	/* free all memory, including dma buffers */
3348 	lpfc_bsg_dma_page_list_free(phba,
3349 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3350 	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3351 	/* multi-buffer write mailbox command pass-through complete */
3352 	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3353 	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3354 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3355 
3356 	return;
3357 }
3358 
3359 /**
3360  * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3361  * @phba: Pointer to HBA context object.
3362  * @pmboxq: Pointer to mailbox command.
3363  *
3364  * This is routine handles BSG job for mailbox commands completions with
3365  * multiple external buffers.
3366  **/
3367 static struct fc_bsg_job *
3368 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3369 {
3370 	struct bsg_job_data *dd_data;
3371 	struct fc_bsg_job *job;
3372 	uint8_t *pmb, *pmb_buf;
3373 	unsigned long flags;
3374 	uint32_t size;
3375 	int rc = 0;
3376 	struct lpfc_dmabuf *dmabuf;
3377 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3378 	uint8_t *pmbx;
3379 
3380 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3381 	dd_data = pmboxq->context1;
3382 	/* has the job already timed out? */
3383 	if (!dd_data) {
3384 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3385 		job = NULL;
3386 		goto job_done_out;
3387 	}
3388 
3389 	/*
3390 	 * The outgoing buffer is readily referred from the dma buffer,
3391 	 * just need to get header part from mailboxq structure.
3392 	 */
3393 	pmb = (uint8_t *)&pmboxq->u.mb;
3394 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3395 	/* Copy the byte swapped response mailbox back to the user */
3396 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3397 	/* if there is any non-embedded extended data copy that too */
3398 	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3399 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3400 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3401 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3402 		pmbx = (uint8_t *)dmabuf->virt;
3403 		/* byte swap the extended data following the mailbox command */
3404 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3405 			&pmbx[sizeof(MAILBOX_t)],
3406 			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3407 	}
3408 
3409 	job = dd_data->context_un.mbox.set_job;
3410 	if (job) {
3411 		size = job->reply_payload.payload_len;
3412 		job->reply->reply_payload_rcv_len =
3413 			sg_copy_from_buffer(job->reply_payload.sg_list,
3414 					    job->reply_payload.sg_cnt,
3415 					    pmb_buf, size);
3416 		/* result for successful */
3417 		job->reply->result = 0;
3418 		job->dd_data = NULL;
3419 		/* need to hold the lock util we set job->dd_data to NULL
3420 		 * to hold off the timeout handler from midlayer to take
3421 		 * any action.
3422 		 */
3423 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3424 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3425 				"2937 SLI_CONFIG ext-buffer maibox command "
3426 				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3427 				phba->mbox_ext_buf_ctx.nembType,
3428 				phba->mbox_ext_buf_ctx.mboxType, size);
3429 		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3430 					phba->mbox_ext_buf_ctx.nembType,
3431 					phba->mbox_ext_buf_ctx.mboxType,
3432 					dma_ebuf, sta_pos_addr,
3433 					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3434 	} else
3435 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3436 
3437 job_done_out:
3438 	if (!job)
3439 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3440 				"2938 SLI_CONFIG ext-buffer maibox "
3441 				"command (x%x/x%x) failure, rc:x%x\n",
3442 				phba->mbox_ext_buf_ctx.nembType,
3443 				phba->mbox_ext_buf_ctx.mboxType, rc);
3444 	/* state change */
3445 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3446 	kfree(dd_data);
3447 
3448 	return job;
3449 }
3450 
3451 /**
3452  * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3453  * @phba: Pointer to HBA context object.
3454  * @pmboxq: Pointer to mailbox command.
3455  *
3456  * This is completion handler function for mailbox read commands with multiple
3457  * external buffers.
3458  **/
3459 static void
3460 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3461 {
3462 	struct fc_bsg_job *job;
3463 
3464 	/* handle the BSG job with mailbox command */
3465 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3466 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3467 
3468 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3469 			"2939 SLI_CONFIG ext-buffer rd maibox command "
3470 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3471 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3472 
3473 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3474 
3475 	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3476 		lpfc_bsg_mbox_ext_session_reset(phba);
3477 
3478 	/* free base driver mailbox structure memory */
3479 	mempool_free(pmboxq, phba->mbox_mem_pool);
3480 
3481 	/* complete the bsg job if we have it */
3482 	if (job)
3483 		job->job_done(job);
3484 
3485 	return;
3486 }
3487 
3488 /**
3489  * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3490  * @phba: Pointer to HBA context object.
3491  * @pmboxq: Pointer to mailbox command.
3492  *
3493  * This is completion handler function for mailbox write commands with multiple
3494  * external buffers.
3495  **/
3496 static void
3497 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3498 {
3499 	struct fc_bsg_job *job;
3500 
3501 	/* handle the BSG job with the mailbox command */
3502 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3503 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3504 
3505 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3506 			"2940 SLI_CONFIG ext-buffer wr maibox command "
3507 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3508 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3509 
3510 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3511 
3512 	/* free all memory, including dma buffers */
3513 	mempool_free(pmboxq, phba->mbox_mem_pool);
3514 	lpfc_bsg_mbox_ext_session_reset(phba);
3515 
3516 	/* complete the bsg job if we have it */
3517 	if (job)
3518 		job->job_done(job);
3519 
3520 	return;
3521 }
3522 
3523 static void
3524 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3525 				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3526 				struct lpfc_dmabuf *ext_dmabuf)
3527 {
3528 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3529 
3530 	/* pointer to the start of mailbox command */
3531 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3532 
3533 	if (nemb_tp == nemb_mse) {
3534 		if (index == 0) {
3535 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3536 				mse[index].pa_hi =
3537 				putPaddrHigh(mbx_dmabuf->phys +
3538 					     sizeof(MAILBOX_t));
3539 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3540 				mse[index].pa_lo =
3541 				putPaddrLow(mbx_dmabuf->phys +
3542 					    sizeof(MAILBOX_t));
3543 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3544 					"2943 SLI_CONFIG(mse)[%d], "
3545 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3546 					index,
3547 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3548 					mse[index].buf_len,
3549 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3550 					mse[index].pa_hi,
3551 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3552 					mse[index].pa_lo);
3553 		} else {
3554 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3555 				mse[index].pa_hi =
3556 				putPaddrHigh(ext_dmabuf->phys);
3557 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3558 				mse[index].pa_lo =
3559 				putPaddrLow(ext_dmabuf->phys);
3560 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3561 					"2944 SLI_CONFIG(mse)[%d], "
3562 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3563 					index,
3564 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3565 					mse[index].buf_len,
3566 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3567 					mse[index].pa_hi,
3568 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3569 					mse[index].pa_lo);
3570 		}
3571 	} else {
3572 		if (index == 0) {
3573 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3574 				hbd[index].pa_hi =
3575 				putPaddrHigh(mbx_dmabuf->phys +
3576 					     sizeof(MAILBOX_t));
3577 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3578 				hbd[index].pa_lo =
3579 				putPaddrLow(mbx_dmabuf->phys +
3580 					    sizeof(MAILBOX_t));
3581 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3582 					"3007 SLI_CONFIG(hbd)[%d], "
3583 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3584 				index,
3585 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3586 				&sli_cfg_mbx->un.
3587 				sli_config_emb1_subsys.hbd[index]),
3588 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3589 				hbd[index].pa_hi,
3590 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3591 				hbd[index].pa_lo);
3592 
3593 		} else {
3594 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3595 				hbd[index].pa_hi =
3596 				putPaddrHigh(ext_dmabuf->phys);
3597 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3598 				hbd[index].pa_lo =
3599 				putPaddrLow(ext_dmabuf->phys);
3600 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3601 					"3008 SLI_CONFIG(hbd)[%d], "
3602 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3603 				index,
3604 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3605 				&sli_cfg_mbx->un.
3606 				sli_config_emb1_subsys.hbd[index]),
3607 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3608 				hbd[index].pa_hi,
3609 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3610 				hbd[index].pa_lo);
3611 		}
3612 	}
3613 	return;
3614 }
3615 
3616 /**
3617  * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3618  * @phba: Pointer to HBA context object.
3619  * @mb: Pointer to a BSG mailbox object.
3620  * @nemb_tp: Enumerate of non-embedded mailbox command type.
3621  * @dmabuff: Pointer to a DMA buffer descriptor.
3622  *
3623  * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3624  * non-embedded external bufffers.
3625  **/
3626 static int
3627 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3628 			      enum nemb_type nemb_tp,
3629 			      struct lpfc_dmabuf *dmabuf)
3630 {
3631 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3632 	struct dfc_mbox_req *mbox_req;
3633 	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3634 	uint32_t ext_buf_cnt, ext_buf_index;
3635 	struct lpfc_dmabuf *ext_dmabuf = NULL;
3636 	struct bsg_job_data *dd_data = NULL;
3637 	LPFC_MBOXQ_t *pmboxq = NULL;
3638 	MAILBOX_t *pmb;
3639 	uint8_t *pmbx;
3640 	int rc, i;
3641 
3642 	mbox_req =
3643 	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3644 
3645 	/* pointer to the start of mailbox command */
3646 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3647 
3648 	if (nemb_tp == nemb_mse) {
3649 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3650 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3651 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3652 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3653 					"2945 Handled SLI_CONFIG(mse) rd, "
3654 					"ext_buf_cnt(%d) out of range(%d)\n",
3655 					ext_buf_cnt,
3656 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3657 			rc = -ERANGE;
3658 			goto job_error;
3659 		}
3660 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 				"2941 Handled SLI_CONFIG(mse) rd, "
3662 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3663 	} else {
3664 		/* sanity check on interface type for support */
3665 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3666 		    LPFC_SLI_INTF_IF_TYPE_2) {
3667 			rc = -ENODEV;
3668 			goto job_error;
3669 		}
3670 		/* nemb_tp == nemb_hbd */
3671 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3672 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3673 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3674 					"2946 Handled SLI_CONFIG(hbd) rd, "
3675 					"ext_buf_cnt(%d) out of range(%d)\n",
3676 					ext_buf_cnt,
3677 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3678 			rc = -ERANGE;
3679 			goto job_error;
3680 		}
3681 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3682 				"2942 Handled SLI_CONFIG(hbd) rd, "
3683 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3684 	}
3685 
3686 	/* before dma descriptor setup */
3687 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3688 					sta_pre_addr, dmabuf, ext_buf_cnt);
3689 
3690 	/* reject non-embedded mailbox command with none external buffer */
3691 	if (ext_buf_cnt == 0) {
3692 		rc = -EPERM;
3693 		goto job_error;
3694 	} else if (ext_buf_cnt > 1) {
3695 		/* additional external read buffers */
3696 		for (i = 1; i < ext_buf_cnt; i++) {
3697 			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3698 			if (!ext_dmabuf) {
3699 				rc = -ENOMEM;
3700 				goto job_error;
3701 			}
3702 			list_add_tail(&ext_dmabuf->list,
3703 				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3704 		}
3705 	}
3706 
3707 	/* bsg tracking structure */
3708 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3709 	if (!dd_data) {
3710 		rc = -ENOMEM;
3711 		goto job_error;
3712 	}
3713 
3714 	/* mailbox command structure for base driver */
3715 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3716 	if (!pmboxq) {
3717 		rc = -ENOMEM;
3718 		goto job_error;
3719 	}
3720 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3721 
3722 	/* for the first external buffer */
3723 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3724 
3725 	/* for the rest of external buffer descriptors if any */
3726 	if (ext_buf_cnt > 1) {
3727 		ext_buf_index = 1;
3728 		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3729 				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3730 			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3731 						ext_buf_index, dmabuf,
3732 						curr_dmabuf);
3733 			ext_buf_index++;
3734 		}
3735 	}
3736 
3737 	/* after dma descriptor setup */
3738 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3739 					sta_pos_addr, dmabuf, ext_buf_cnt);
3740 
3741 	/* construct base driver mbox command */
3742 	pmb = &pmboxq->u.mb;
3743 	pmbx = (uint8_t *)dmabuf->virt;
3744 	memcpy(pmb, pmbx, sizeof(*pmb));
3745 	pmb->mbxOwner = OWN_HOST;
3746 	pmboxq->vport = phba->pport;
3747 
3748 	/* multi-buffer handling context */
3749 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3750 	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3751 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3752 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3753 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3754 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3755 
3756 	/* callback for multi-buffer read mailbox command */
3757 	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3758 
3759 	/* context fields to callback function */
3760 	pmboxq->context1 = dd_data;
3761 	dd_data->type = TYPE_MBOX;
3762 	dd_data->context_un.mbox.pmboxq = pmboxq;
3763 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3764 	dd_data->context_un.mbox.set_job = job;
3765 	job->dd_data = dd_data;
3766 
3767 	/* state change */
3768 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3769 
3770 	/*
3771 	 * Non-embedded mailbox subcommand data gets byte swapped here because
3772 	 * the lower level driver code only does the first 64 mailbox words.
3773 	 */
3774 	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3775 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3776 		(nemb_tp == nemb_mse))
3777 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3778 			&pmbx[sizeof(MAILBOX_t)],
3779 				sli_cfg_mbx->un.sli_config_emb0_subsys.
3780 					mse[0].buf_len);
3781 
3782 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3783 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3784 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3785 				"2947 Issued SLI_CONFIG ext-buffer "
3786 				"maibox command, rc:x%x\n", rc);
3787 		return SLI_CONFIG_HANDLED;
3788 	}
3789 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3790 			"2948 Failed to issue SLI_CONFIG ext-buffer "
3791 			"maibox command, rc:x%x\n", rc);
3792 	rc = -EPIPE;
3793 
3794 job_error:
3795 	if (pmboxq)
3796 		mempool_free(pmboxq, phba->mbox_mem_pool);
3797 	lpfc_bsg_dma_page_list_free(phba,
3798 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3799 	kfree(dd_data);
3800 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3801 	return rc;
3802 }
3803 
3804 /**
3805  * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3806  * @phba: Pointer to HBA context object.
3807  * @mb: Pointer to a BSG mailbox object.
3808  * @dmabuff: Pointer to a DMA buffer descriptor.
3809  *
3810  * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3811  * non-embedded external bufffers.
3812  **/
3813 static int
3814 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3815 			       enum nemb_type nemb_tp,
3816 			       struct lpfc_dmabuf *dmabuf)
3817 {
3818 	struct dfc_mbox_req *mbox_req;
3819 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3820 	uint32_t ext_buf_cnt;
3821 	struct bsg_job_data *dd_data = NULL;
3822 	LPFC_MBOXQ_t *pmboxq = NULL;
3823 	MAILBOX_t *pmb;
3824 	uint8_t *mbx;
3825 	int rc = SLI_CONFIG_NOT_HANDLED, i;
3826 
3827 	mbox_req =
3828 	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3829 
3830 	/* pointer to the start of mailbox command */
3831 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3832 
3833 	if (nemb_tp == nemb_mse) {
3834 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3835 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3836 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3837 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3838 					"2953 Failed SLI_CONFIG(mse) wr, "
3839 					"ext_buf_cnt(%d) out of range(%d)\n",
3840 					ext_buf_cnt,
3841 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3842 			return -ERANGE;
3843 		}
3844 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3845 				"2949 Handled SLI_CONFIG(mse) wr, "
3846 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3847 	} else {
3848 		/* sanity check on interface type for support */
3849 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3850 		    LPFC_SLI_INTF_IF_TYPE_2)
3851 			return -ENODEV;
3852 		/* nemb_tp == nemb_hbd */
3853 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3854 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3855 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3856 					"2954 Failed SLI_CONFIG(hbd) wr, "
3857 					"ext_buf_cnt(%d) out of range(%d)\n",
3858 					ext_buf_cnt,
3859 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3860 			return -ERANGE;
3861 		}
3862 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3863 				"2950 Handled SLI_CONFIG(hbd) wr, "
3864 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3865 	}
3866 
3867 	/* before dma buffer descriptor setup */
3868 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3869 					sta_pre_addr, dmabuf, ext_buf_cnt);
3870 
3871 	if (ext_buf_cnt == 0)
3872 		return -EPERM;
3873 
3874 	/* for the first external buffer */
3875 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3876 
3877 	/* after dma descriptor setup */
3878 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3879 					sta_pos_addr, dmabuf, ext_buf_cnt);
3880 
3881 	/* log for looking forward */
3882 	for (i = 1; i < ext_buf_cnt; i++) {
3883 		if (nemb_tp == nemb_mse)
3884 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3885 				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3886 				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3887 				mse[i].buf_len);
3888 		else
3889 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3890 				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3891 				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3892 				&sli_cfg_mbx->un.sli_config_emb1_subsys.
3893 				hbd[i]));
3894 	}
3895 
3896 	/* multi-buffer handling context */
3897 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3898 	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3899 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3900 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3901 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3902 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3903 
3904 	if (ext_buf_cnt == 1) {
3905 		/* bsg tracking structure */
3906 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3907 		if (!dd_data) {
3908 			rc = -ENOMEM;
3909 			goto job_error;
3910 		}
3911 
3912 		/* mailbox command structure for base driver */
3913 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3914 		if (!pmboxq) {
3915 			rc = -ENOMEM;
3916 			goto job_error;
3917 		}
3918 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3919 		pmb = &pmboxq->u.mb;
3920 		mbx = (uint8_t *)dmabuf->virt;
3921 		memcpy(pmb, mbx, sizeof(*pmb));
3922 		pmb->mbxOwner = OWN_HOST;
3923 		pmboxq->vport = phba->pport;
3924 
3925 		/* callback for multi-buffer read mailbox command */
3926 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3927 
3928 		/* context fields to callback function */
3929 		pmboxq->context1 = dd_data;
3930 		dd_data->type = TYPE_MBOX;
3931 		dd_data->context_un.mbox.pmboxq = pmboxq;
3932 		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3933 		dd_data->context_un.mbox.set_job = job;
3934 		job->dd_data = dd_data;
3935 
3936 		/* state change */
3937 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3938 
3939 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3940 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3941 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3942 					"2955 Issued SLI_CONFIG ext-buffer "
3943 					"maibox command, rc:x%x\n", rc);
3944 			return SLI_CONFIG_HANDLED;
3945 		}
3946 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3947 				"2956 Failed to issue SLI_CONFIG ext-buffer "
3948 				"maibox command, rc:x%x\n", rc);
3949 		rc = -EPIPE;
3950 		goto job_error;
3951 	}
3952 
3953 	/* wait for additoinal external buffers */
3954 	job->reply->result = 0;
3955 	job->job_done(job);
3956 	return SLI_CONFIG_HANDLED;
3957 
3958 job_error:
3959 	if (pmboxq)
3960 		mempool_free(pmboxq, phba->mbox_mem_pool);
3961 	kfree(dd_data);
3962 
3963 	return rc;
3964 }
3965 
3966 /**
3967  * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3968  * @phba: Pointer to HBA context object.
3969  * @mb: Pointer to a BSG mailbox object.
3970  * @dmabuff: Pointer to a DMA buffer descriptor.
3971  *
3972  * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3973  * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3974  * with embedded sussystem 0x1 and opcodes with external HBDs.
3975  **/
3976 static int
3977 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3978 			     struct lpfc_dmabuf *dmabuf)
3979 {
3980 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3981 	uint32_t subsys;
3982 	uint32_t opcode;
3983 	int rc = SLI_CONFIG_NOT_HANDLED;
3984 
3985 	/* state change on new multi-buffer pass-through mailbox command */
3986 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3987 
3988 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3989 
3990 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3991 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3992 		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3993 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
3994 		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3995 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
3996 		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3997 			switch (opcode) {
3998 			case FCOE_OPCODE_READ_FCF:
3999 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4000 						"2957 Handled SLI_CONFIG "
4001 						"subsys_fcoe, opcode:x%x\n",
4002 						opcode);
4003 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4004 							nemb_mse, dmabuf);
4005 				break;
4006 			case FCOE_OPCODE_ADD_FCF:
4007 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4008 						"2958 Handled SLI_CONFIG "
4009 						"subsys_fcoe, opcode:x%x\n",
4010 						opcode);
4011 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4012 							nemb_mse, dmabuf);
4013 				break;
4014 			default:
4015 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4016 						"2959 Reject SLI_CONFIG "
4017 						"subsys_fcoe, opcode:x%x\n",
4018 						opcode);
4019 				rc = -EPERM;
4020 				break;
4021 			}
4022 		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4023 			switch (opcode) {
4024 			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4025 			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4026 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4027 						"3106 Handled SLI_CONFIG "
4028 						"subsys_comn, opcode:x%x\n",
4029 						opcode);
4030 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4031 							nemb_mse, dmabuf);
4032 				break;
4033 			default:
4034 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4035 						"3107 Reject SLI_CONFIG "
4036 						"subsys_comn, opcode:x%x\n",
4037 						opcode);
4038 				rc = -EPERM;
4039 				break;
4040 			}
4041 		} else {
4042 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4043 					"2977 Reject SLI_CONFIG "
4044 					"subsys:x%d, opcode:x%x\n",
4045 					subsys, opcode);
4046 			rc = -EPERM;
4047 		}
4048 	} else {
4049 		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4050 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4051 		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4052 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4053 		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4054 			switch (opcode) {
4055 			case COMN_OPCODE_READ_OBJECT:
4056 			case COMN_OPCODE_READ_OBJECT_LIST:
4057 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4058 						"2960 Handled SLI_CONFIG "
4059 						"subsys_comn, opcode:x%x\n",
4060 						opcode);
4061 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4062 							nemb_hbd, dmabuf);
4063 				break;
4064 			case COMN_OPCODE_WRITE_OBJECT:
4065 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4066 						"2961 Handled SLI_CONFIG "
4067 						"subsys_comn, opcode:x%x\n",
4068 						opcode);
4069 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4070 							nemb_hbd, dmabuf);
4071 				break;
4072 			default:
4073 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4074 						"2962 Not handled SLI_CONFIG "
4075 						"subsys_comn, opcode:x%x\n",
4076 						opcode);
4077 				rc = SLI_CONFIG_NOT_HANDLED;
4078 				break;
4079 			}
4080 		} else {
4081 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4082 					"2978 Not handled SLI_CONFIG "
4083 					"subsys:x%d, opcode:x%x\n",
4084 					subsys, opcode);
4085 			rc = SLI_CONFIG_NOT_HANDLED;
4086 		}
4087 	}
4088 
4089 	/* state reset on not handled new multi-buffer mailbox command */
4090 	if (rc != SLI_CONFIG_HANDLED)
4091 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4092 
4093 	return rc;
4094 }
4095 
4096 /**
4097  * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4098  * @phba: Pointer to HBA context object.
4099  *
4100  * This routine is for requesting to abort a pass-through mailbox command with
4101  * multiple external buffers due to error condition.
4102  **/
4103 static void
4104 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4105 {
4106 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4107 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4108 	else
4109 		lpfc_bsg_mbox_ext_session_reset(phba);
4110 	return;
4111 }
4112 
4113 /**
4114  * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4115  * @phba: Pointer to HBA context object.
4116  * @dmabuf: Pointer to a DMA buffer descriptor.
4117  *
4118  * This routine extracts the next mailbox read external buffer back to
4119  * user space through BSG.
4120  **/
4121 static int
4122 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4123 {
4124 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4125 	struct lpfc_dmabuf *dmabuf;
4126 	uint8_t *pbuf;
4127 	uint32_t size;
4128 	uint32_t index;
4129 
4130 	index = phba->mbox_ext_buf_ctx.seqNum;
4131 	phba->mbox_ext_buf_ctx.seqNum++;
4132 
4133 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4134 			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4135 
4136 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4137 		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4138 			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4139 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4140 				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4141 				"buffer[%d], size:%d\n", index, size);
4142 	} else {
4143 		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4144 			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4145 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4146 				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4147 				"buffer[%d], size:%d\n", index, size);
4148 	}
4149 	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4150 		return -EPIPE;
4151 	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4152 				  struct lpfc_dmabuf, list);
4153 	list_del_init(&dmabuf->list);
4154 
4155 	/* after dma buffer descriptor setup */
4156 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4157 					mbox_rd, dma_ebuf, sta_pos_addr,
4158 					dmabuf, index);
4159 
4160 	pbuf = (uint8_t *)dmabuf->virt;
4161 	job->reply->reply_payload_rcv_len =
4162 		sg_copy_from_buffer(job->reply_payload.sg_list,
4163 				    job->reply_payload.sg_cnt,
4164 				    pbuf, size);
4165 
4166 	lpfc_bsg_dma_page_free(phba, dmabuf);
4167 
4168 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4169 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4170 				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4171 				"command session done\n");
4172 		lpfc_bsg_mbox_ext_session_reset(phba);
4173 	}
4174 
4175 	job->reply->result = 0;
4176 	job->job_done(job);
4177 
4178 	return SLI_CONFIG_HANDLED;
4179 }
4180 
4181 /**
4182  * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4183  * @phba: Pointer to HBA context object.
4184  * @dmabuf: Pointer to a DMA buffer descriptor.
4185  *
4186  * This routine sets up the next mailbox read external buffer obtained
4187  * from user space through BSG.
4188  **/
4189 static int
4190 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4191 			struct lpfc_dmabuf *dmabuf)
4192 {
4193 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4194 	struct bsg_job_data *dd_data = NULL;
4195 	LPFC_MBOXQ_t *pmboxq = NULL;
4196 	MAILBOX_t *pmb;
4197 	enum nemb_type nemb_tp;
4198 	uint8_t *pbuf;
4199 	uint32_t size;
4200 	uint32_t index;
4201 	int rc;
4202 
4203 	index = phba->mbox_ext_buf_ctx.seqNum;
4204 	phba->mbox_ext_buf_ctx.seqNum++;
4205 	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4206 
4207 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4208 			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4209 
4210 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4211 	if (!dd_data) {
4212 		rc = -ENOMEM;
4213 		goto job_error;
4214 	}
4215 
4216 	pbuf = (uint8_t *)dmabuf->virt;
4217 	size = job->request_payload.payload_len;
4218 	sg_copy_to_buffer(job->request_payload.sg_list,
4219 			  job->request_payload.sg_cnt,
4220 			  pbuf, size);
4221 
4222 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4223 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4224 				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4225 				"buffer[%d], size:%d\n",
4226 				phba->mbox_ext_buf_ctx.seqNum, size);
4227 
4228 	} else {
4229 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4230 				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4231 				"buffer[%d], size:%d\n",
4232 				phba->mbox_ext_buf_ctx.seqNum, size);
4233 
4234 	}
4235 
4236 	/* set up external buffer descriptor and add to external buffer list */
4237 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4238 					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4239 					dmabuf);
4240 	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4241 
4242 	/* after write dma buffer */
4243 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4244 					mbox_wr, dma_ebuf, sta_pos_addr,
4245 					dmabuf, index);
4246 
4247 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4248 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4249 				"2968 SLI_CONFIG ext-buffer wr all %d "
4250 				"ebuffers received\n",
4251 				phba->mbox_ext_buf_ctx.numBuf);
4252 		/* mailbox command structure for base driver */
4253 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4254 		if (!pmboxq) {
4255 			rc = -ENOMEM;
4256 			goto job_error;
4257 		}
4258 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4259 		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4260 		pmb = &pmboxq->u.mb;
4261 		memcpy(pmb, pbuf, sizeof(*pmb));
4262 		pmb->mbxOwner = OWN_HOST;
4263 		pmboxq->vport = phba->pport;
4264 
4265 		/* callback for multi-buffer write mailbox command */
4266 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4267 
4268 		/* context fields to callback function */
4269 		pmboxq->context1 = dd_data;
4270 		dd_data->type = TYPE_MBOX;
4271 		dd_data->context_un.mbox.pmboxq = pmboxq;
4272 		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4273 		dd_data->context_un.mbox.set_job = job;
4274 		job->dd_data = dd_data;
4275 
4276 		/* state change */
4277 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4278 
4279 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4280 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4281 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4282 					"2969 Issued SLI_CONFIG ext-buffer "
4283 					"maibox command, rc:x%x\n", rc);
4284 			return SLI_CONFIG_HANDLED;
4285 		}
4286 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4287 				"2970 Failed to issue SLI_CONFIG ext-buffer "
4288 				"maibox command, rc:x%x\n", rc);
4289 		rc = -EPIPE;
4290 		goto job_error;
4291 	}
4292 
4293 	/* wait for additoinal external buffers */
4294 	job->reply->result = 0;
4295 	job->job_done(job);
4296 	return SLI_CONFIG_HANDLED;
4297 
4298 job_error:
4299 	lpfc_bsg_dma_page_free(phba, dmabuf);
4300 	kfree(dd_data);
4301 
4302 	return rc;
4303 }
4304 
4305 /**
4306  * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4307  * @phba: Pointer to HBA context object.
4308  * @mb: Pointer to a BSG mailbox object.
4309  * @dmabuff: Pointer to a DMA buffer descriptor.
4310  *
4311  * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4312  * command with multiple non-embedded external buffers.
4313  **/
4314 static int
4315 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
4316 			     struct lpfc_dmabuf *dmabuf)
4317 {
4318 	int rc;
4319 
4320 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4321 			"2971 SLI_CONFIG buffer (type:x%x)\n",
4322 			phba->mbox_ext_buf_ctx.mboxType);
4323 
4324 	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4325 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4326 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4327 					"2972 SLI_CONFIG rd buffer state "
4328 					"mismatch:x%x\n",
4329 					phba->mbox_ext_buf_ctx.state);
4330 			lpfc_bsg_mbox_ext_abort(phba);
4331 			return -EPIPE;
4332 		}
4333 		rc = lpfc_bsg_read_ebuf_get(phba, job);
4334 		if (rc == SLI_CONFIG_HANDLED)
4335 			lpfc_bsg_dma_page_free(phba, dmabuf);
4336 	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4337 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4338 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4339 					"2973 SLI_CONFIG wr buffer state "
4340 					"mismatch:x%x\n",
4341 					phba->mbox_ext_buf_ctx.state);
4342 			lpfc_bsg_mbox_ext_abort(phba);
4343 			return -EPIPE;
4344 		}
4345 		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4346 	}
4347 	return rc;
4348 }
4349 
4350 /**
4351  * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4352  * @phba: Pointer to HBA context object.
4353  * @mb: Pointer to a BSG mailbox object.
4354  * @dmabuff: Pointer to a DMA buffer descriptor.
4355  *
4356  * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4357  * (0x9B) mailbox commands and external buffers.
4358  **/
4359 static int
4360 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4361 			    struct lpfc_dmabuf *dmabuf)
4362 {
4363 	struct dfc_mbox_req *mbox_req;
4364 	int rc = SLI_CONFIG_NOT_HANDLED;
4365 
4366 	mbox_req =
4367 	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4368 
4369 	/* mbox command with/without single external buffer */
4370 	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4371 		return rc;
4372 
4373 	/* mbox command and first external buffer */
4374 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4375 		if (mbox_req->extSeqNum == 1) {
4376 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4377 					"2974 SLI_CONFIG mailbox: tag:%d, "
4378 					"seq:%d\n", mbox_req->extMboxTag,
4379 					mbox_req->extSeqNum);
4380 			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4381 			return rc;
4382 		} else
4383 			goto sli_cfg_ext_error;
4384 	}
4385 
4386 	/*
4387 	 * handle additional external buffers
4388 	 */
4389 
4390 	/* check broken pipe conditions */
4391 	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4392 		goto sli_cfg_ext_error;
4393 	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4394 		goto sli_cfg_ext_error;
4395 	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4396 		goto sli_cfg_ext_error;
4397 
4398 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4399 			"2975 SLI_CONFIG mailbox external buffer: "
4400 			"extSta:x%x, tag:%d, seq:%d\n",
4401 			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4402 			mbox_req->extSeqNum);
4403 	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4404 	return rc;
4405 
4406 sli_cfg_ext_error:
4407 	/* all other cases, broken pipe */
4408 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4409 			"2976 SLI_CONFIG mailbox broken pipe: "
4410 			"ctxSta:x%x, ctxNumBuf:%d "
4411 			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4412 			phba->mbox_ext_buf_ctx.state,
4413 			phba->mbox_ext_buf_ctx.numBuf,
4414 			phba->mbox_ext_buf_ctx.mbxTag,
4415 			phba->mbox_ext_buf_ctx.seqNum,
4416 			mbox_req->extMboxTag, mbox_req->extSeqNum);
4417 
4418 	lpfc_bsg_mbox_ext_session_reset(phba);
4419 
4420 	return -EPIPE;
4421 }
4422 
4423 /**
4424  * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4425  * @phba: Pointer to HBA context object.
4426  * @mb: Pointer to a mailbox object.
4427  * @vport: Pointer to a vport object.
4428  *
4429  * Allocate a tracking object, mailbox command memory, get a mailbox
4430  * from the mailbox pool, copy the caller mailbox command.
4431  *
4432  * If offline and the sli is active we need to poll for the command (port is
4433  * being reset) and com-plete the job, otherwise issue the mailbox command and
4434  * let our completion handler finish the command.
4435  **/
4436 static uint32_t
4437 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4438 	struct lpfc_vport *vport)
4439 {
4440 	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4441 	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4442 	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4443 	uint8_t *pmbx = NULL;
4444 	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4445 	struct lpfc_dmabuf *dmabuf = NULL;
4446 	struct dfc_mbox_req *mbox_req;
4447 	struct READ_EVENT_LOG_VAR *rdEventLog;
4448 	uint32_t transmit_length, receive_length, mode;
4449 	struct lpfc_mbx_sli4_config *sli4_config;
4450 	struct lpfc_mbx_nembed_cmd *nembed_sge;
4451 	struct mbox_header *header;
4452 	struct ulp_bde64 *bde;
4453 	uint8_t *ext = NULL;
4454 	int rc = 0;
4455 	uint8_t *from;
4456 	uint32_t size;
4457 
4458 
4459 	/* in case no data is transferred */
4460 	job->reply->reply_payload_rcv_len = 0;
4461 
4462 	/* sanity check to protect driver */
4463 	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4464 	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4465 		rc = -ERANGE;
4466 		goto job_done;
4467 	}
4468 
4469 	/*
4470 	 * Don't allow mailbox commands to be sent when blocked or when in
4471 	 * the middle of discovery
4472 	 */
4473 	 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4474 		rc = -EAGAIN;
4475 		goto job_done;
4476 	}
4477 
4478 	mbox_req =
4479 	    (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4480 
4481 	/* check if requested extended data lengths are valid */
4482 	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4483 	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4484 		rc = -ERANGE;
4485 		goto job_done;
4486 	}
4487 
4488 	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4489 	if (!dmabuf || !dmabuf->virt) {
4490 		rc = -ENOMEM;
4491 		goto job_done;
4492 	}
4493 
4494 	/* Get the mailbox command or external buffer from BSG */
4495 	pmbx = (uint8_t *)dmabuf->virt;
4496 	size = job->request_payload.payload_len;
4497 	sg_copy_to_buffer(job->request_payload.sg_list,
4498 			  job->request_payload.sg_cnt, pmbx, size);
4499 
4500 	/* Handle possible SLI_CONFIG with non-embedded payloads */
4501 	if (phba->sli_rev == LPFC_SLI_REV4) {
4502 		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4503 		if (rc == SLI_CONFIG_HANDLED)
4504 			goto job_cont;
4505 		if (rc)
4506 			goto job_done;
4507 		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4508 	}
4509 
4510 	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4511 	if (rc != 0)
4512 		goto job_done; /* must be negative */
4513 
4514 	/* allocate our bsg tracking structure */
4515 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4516 	if (!dd_data) {
4517 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4518 				"2727 Failed allocation of dd_data\n");
4519 		rc = -ENOMEM;
4520 		goto job_done;
4521 	}
4522 
4523 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4524 	if (!pmboxq) {
4525 		rc = -ENOMEM;
4526 		goto job_done;
4527 	}
4528 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4529 
4530 	pmb = &pmboxq->u.mb;
4531 	memcpy(pmb, pmbx, sizeof(*pmb));
4532 	pmb->mbxOwner = OWN_HOST;
4533 	pmboxq->vport = vport;
4534 
4535 	/* If HBA encountered an error attention, allow only DUMP
4536 	 * or RESTART mailbox commands until the HBA is restarted.
4537 	 */
4538 	if (phba->pport->stopped &&
4539 	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4540 	    pmb->mbxCommand != MBX_RESTART &&
4541 	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4542 	    pmb->mbxCommand != MBX_WRITE_WWN)
4543 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4544 				"2797 mbox: Issued mailbox cmd "
4545 				"0x%x while in stopped state.\n",
4546 				pmb->mbxCommand);
4547 
4548 	/* extended mailbox commands will need an extended buffer */
4549 	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4550 		from = pmbx;
4551 		ext = from + sizeof(MAILBOX_t);
4552 		pmboxq->context2 = ext;
4553 		pmboxq->in_ext_byte_len =
4554 			mbox_req->inExtWLen * sizeof(uint32_t);
4555 		pmboxq->out_ext_byte_len =
4556 			mbox_req->outExtWLen * sizeof(uint32_t);
4557 		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4558 	}
4559 
4560 	/* biu diag will need a kernel buffer to transfer the data
4561 	 * allocate our own buffer and setup the mailbox command to
4562 	 * use ours
4563 	 */
4564 	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4565 		transmit_length = pmb->un.varWords[1];
4566 		receive_length = pmb->un.varWords[4];
4567 		/* transmit length cannot be greater than receive length or
4568 		 * mailbox extension size
4569 		 */
4570 		if ((transmit_length > receive_length) ||
4571 			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4572 			rc = -ERANGE;
4573 			goto job_done;
4574 		}
4575 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4576 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4577 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4578 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4579 
4580 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4581 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4582 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4583 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4584 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4585 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4586 	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4587 		rdEventLog = &pmb->un.varRdEventLog;
4588 		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4589 		mode = bf_get(lpfc_event_log, rdEventLog);
4590 
4591 		/* receive length cannot be greater than mailbox
4592 		 * extension size
4593 		 */
4594 		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4595 			rc = -ERANGE;
4596 			goto job_done;
4597 		}
4598 
4599 		/* mode zero uses a bde like biu diags command */
4600 		if (mode == 0) {
4601 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4602 							+ sizeof(MAILBOX_t));
4603 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4604 							+ sizeof(MAILBOX_t));
4605 		}
4606 	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4607 		/* Let type 4 (well known data) through because the data is
4608 		 * returned in varwords[4-8]
4609 		 * otherwise check the recieve length and fetch the buffer addr
4610 		 */
4611 		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4612 			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4613 			/* rebuild the command for sli4 using our own buffers
4614 			* like we do for biu diags
4615 			*/
4616 			receive_length = pmb->un.varWords[2];
4617 			/* receive length cannot be greater than mailbox
4618 			 * extension size
4619 			 */
4620 			if (receive_length == 0) {
4621 				rc = -ERANGE;
4622 				goto job_done;
4623 			}
4624 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4625 						+ sizeof(MAILBOX_t));
4626 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4627 						+ sizeof(MAILBOX_t));
4628 		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4629 			pmb->un.varUpdateCfg.co) {
4630 			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4631 
4632 			/* bde size cannot be greater than mailbox ext size */
4633 			if (bde->tus.f.bdeSize >
4634 			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4635 				rc = -ERANGE;
4636 				goto job_done;
4637 			}
4638 			bde->addrHigh = putPaddrHigh(dmabuf->phys
4639 						+ sizeof(MAILBOX_t));
4640 			bde->addrLow = putPaddrLow(dmabuf->phys
4641 						+ sizeof(MAILBOX_t));
4642 		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4643 			/* Handling non-embedded SLI_CONFIG mailbox command */
4644 			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4645 			if (!bf_get(lpfc_mbox_hdr_emb,
4646 			    &sli4_config->header.cfg_mhdr)) {
4647 				/* rebuild the command for sli4 using our
4648 				 * own buffers like we do for biu diags
4649 				 */
4650 				header = (struct mbox_header *)
4651 						&pmb->un.varWords[0];
4652 				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4653 						&pmb->un.varWords[0];
4654 				receive_length = nembed_sge->sge[0].length;
4655 
4656 				/* receive length cannot be greater than
4657 				 * mailbox extension size
4658 				 */
4659 				if ((receive_length == 0) ||
4660 				    (receive_length >
4661 				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4662 					rc = -ERANGE;
4663 					goto job_done;
4664 				}
4665 
4666 				nembed_sge->sge[0].pa_hi =
4667 						putPaddrHigh(dmabuf->phys
4668 						   + sizeof(MAILBOX_t));
4669 				nembed_sge->sge[0].pa_lo =
4670 						putPaddrLow(dmabuf->phys
4671 						   + sizeof(MAILBOX_t));
4672 			}
4673 		}
4674 	}
4675 
4676 	dd_data->context_un.mbox.dmabuffers = dmabuf;
4677 
4678 	/* setup wake call as IOCB callback */
4679 	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4680 
4681 	/* setup context field to pass wait_queue pointer to wake function */
4682 	pmboxq->context1 = dd_data;
4683 	dd_data->type = TYPE_MBOX;
4684 	dd_data->context_un.mbox.pmboxq = pmboxq;
4685 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4686 	dd_data->context_un.mbox.set_job = job;
4687 	dd_data->context_un.mbox.ext = ext;
4688 	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4689 	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4690 	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4691 	job->dd_data = dd_data;
4692 
4693 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4694 	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4695 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4696 		if (rc != MBX_SUCCESS) {
4697 			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4698 			goto job_done;
4699 		}
4700 
4701 		/* job finished, copy the data */
4702 		memcpy(pmbx, pmb, sizeof(*pmb));
4703 		job->reply->reply_payload_rcv_len =
4704 			sg_copy_from_buffer(job->reply_payload.sg_list,
4705 					    job->reply_payload.sg_cnt,
4706 					    pmbx, size);
4707 		/* not waiting mbox already done */
4708 		rc = 0;
4709 		goto job_done;
4710 	}
4711 
4712 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4713 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4714 		return 1; /* job started */
4715 
4716 job_done:
4717 	/* common exit for error or job completed inline */
4718 	if (pmboxq)
4719 		mempool_free(pmboxq, phba->mbox_mem_pool);
4720 	lpfc_bsg_dma_page_free(phba, dmabuf);
4721 	kfree(dd_data);
4722 
4723 job_cont:
4724 	return rc;
4725 }
4726 
4727 /**
4728  * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4729  * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4730  **/
4731 static int
4732 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4733 {
4734 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4735 	struct lpfc_hba *phba = vport->phba;
4736 	struct dfc_mbox_req *mbox_req;
4737 	int rc = 0;
4738 
4739 	/* mix-and-match backward compatibility */
4740 	job->reply->reply_payload_rcv_len = 0;
4741 	if (job->request_len <
4742 	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4743 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4744 				"2737 Mix-and-match backward compability "
4745 				"between MBOX_REQ old size:%d and "
4746 				"new request size:%d\n",
4747 				(int)(job->request_len -
4748 				      sizeof(struct fc_bsg_request)),
4749 				(int)sizeof(struct dfc_mbox_req));
4750 		mbox_req = (struct dfc_mbox_req *)
4751 				job->request->rqst_data.h_vendor.vendor_cmd;
4752 		mbox_req->extMboxTag = 0;
4753 		mbox_req->extSeqNum = 0;
4754 	}
4755 
4756 	rc = lpfc_bsg_issue_mbox(phba, job, vport);
4757 
4758 	if (rc == 0) {
4759 		/* job done */
4760 		job->reply->result = 0;
4761 		job->dd_data = NULL;
4762 		job->job_done(job);
4763 	} else if (rc == 1)
4764 		/* job submitted, will complete later*/
4765 		rc = 0; /* return zero, no error */
4766 	else {
4767 		/* some error occurred */
4768 		job->reply->result = rc;
4769 		job->dd_data = NULL;
4770 	}
4771 
4772 	return rc;
4773 }
4774 
4775 /**
4776  * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4777  * @phba: Pointer to HBA context object.
4778  * @cmdiocbq: Pointer to command iocb.
4779  * @rspiocbq: Pointer to response iocb.
4780  *
4781  * This function is the completion handler for iocbs issued using
4782  * lpfc_menlo_cmd function. This function is called by the
4783  * ring event handler function without any lock held. This function
4784  * can be called from both worker thread context and interrupt
4785  * context. This function also can be called from another thread which
4786  * cleans up the SLI layer objects.
4787  * This function copies the contents of the response iocb to the
4788  * response iocb memory object provided by the caller of
4789  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4790  * sleeps for the iocb completion.
4791  **/
4792 static void
4793 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4794 			struct lpfc_iocbq *cmdiocbq,
4795 			struct lpfc_iocbq *rspiocbq)
4796 {
4797 	struct bsg_job_data *dd_data;
4798 	struct fc_bsg_job *job;
4799 	IOCB_t *rsp;
4800 	struct lpfc_dmabuf *bmp;
4801 	struct lpfc_bsg_menlo *menlo;
4802 	unsigned long flags;
4803 	struct menlo_response *menlo_resp;
4804 	int rc = 0;
4805 
4806 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
4807 	dd_data = cmdiocbq->context1;
4808 	if (!dd_data) {
4809 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4810 		return;
4811 	}
4812 
4813 	menlo = &dd_data->context_un.menlo;
4814 	job = menlo->set_job;
4815 	job->dd_data = NULL; /* so timeout handler does not reply */
4816 
4817 	spin_lock(&phba->hbalock);
4818 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4819 	if (cmdiocbq->context2 && rspiocbq)
4820 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4821 		       &rspiocbq->iocb, sizeof(IOCB_t));
4822 	spin_unlock(&phba->hbalock);
4823 
4824 	bmp = menlo->bmp;
4825 	rspiocbq = menlo->rspiocbq;
4826 	rsp = &rspiocbq->iocb;
4827 
4828 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4829 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
4830 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4831 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4832 
4833 	/* always return the xri, this would be used in the case
4834 	 * of a menlo download to allow the data to be sent as a continuation
4835 	 * of the exchange.
4836 	 */
4837 	menlo_resp = (struct menlo_response *)
4838 		job->reply->reply_data.vendor_reply.vendor_rsp;
4839 	menlo_resp->xri = rsp->ulpContext;
4840 	if (rsp->ulpStatus) {
4841 		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4842 			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4843 			case IOERR_SEQUENCE_TIMEOUT:
4844 				rc = -ETIMEDOUT;
4845 				break;
4846 			case IOERR_INVALID_RPI:
4847 				rc = -EFAULT;
4848 				break;
4849 			default:
4850 				rc = -EACCES;
4851 				break;
4852 			}
4853 		} else
4854 			rc = -EACCES;
4855 	} else
4856 		job->reply->reply_payload_rcv_len =
4857 			rsp->un.genreq64.bdl.bdeSize;
4858 
4859 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4860 	lpfc_sli_release_iocbq(phba, rspiocbq);
4861 	lpfc_sli_release_iocbq(phba, cmdiocbq);
4862 	kfree(bmp);
4863 	kfree(dd_data);
4864 	/* make error code available to userspace */
4865 	job->reply->result = rc;
4866 	/* complete the job back to userspace */
4867 	job->job_done(job);
4868 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4869 	return;
4870 }
4871 
4872 /**
4873  * lpfc_menlo_cmd - send an ioctl for menlo hardware
4874  * @job: fc_bsg_job to handle
4875  *
4876  * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
4877  * all the command completions will return the xri for the command.
4878  * For menlo data requests a gen request 64 CX is used to continue the exchange
4879  * supplied in the menlo request header xri field.
4880  **/
4881 static int
4882 lpfc_menlo_cmd(struct fc_bsg_job *job)
4883 {
4884 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4885 	struct lpfc_hba *phba = vport->phba;
4886 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
4887 	IOCB_t *cmd, *rsp;
4888 	int rc = 0;
4889 	struct menlo_command *menlo_cmd;
4890 	struct menlo_response *menlo_resp;
4891 	struct lpfc_dmabuf *bmp = NULL;
4892 	int request_nseg;
4893 	int reply_nseg;
4894 	struct scatterlist *sgel = NULL;
4895 	int numbde;
4896 	dma_addr_t busaddr;
4897 	struct bsg_job_data *dd_data;
4898 	struct ulp_bde64 *bpl = NULL;
4899 
4900 	/* in case no data is returned return just the return code */
4901 	job->reply->reply_payload_rcv_len = 0;
4902 
4903 	if (job->request_len <
4904 	    sizeof(struct fc_bsg_request) +
4905 		sizeof(struct menlo_command)) {
4906 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4907 				"2784 Received MENLO_CMD request below "
4908 				"minimum size\n");
4909 		rc = -ERANGE;
4910 		goto no_dd_data;
4911 	}
4912 
4913 	if (job->reply_len <
4914 	    sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
4915 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4916 				"2785 Received MENLO_CMD reply below "
4917 				"minimum size\n");
4918 		rc = -ERANGE;
4919 		goto no_dd_data;
4920 	}
4921 
4922 	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
4923 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4924 				"2786 Adapter does not support menlo "
4925 				"commands\n");
4926 		rc = -EPERM;
4927 		goto no_dd_data;
4928 	}
4929 
4930 	menlo_cmd = (struct menlo_command *)
4931 		job->request->rqst_data.h_vendor.vendor_cmd;
4932 
4933 	menlo_resp = (struct menlo_response *)
4934 		job->reply->reply_data.vendor_reply.vendor_rsp;
4935 
4936 	/* allocate our bsg tracking structure */
4937 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4938 	if (!dd_data) {
4939 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4940 				"2787 Failed allocation of dd_data\n");
4941 		rc = -ENOMEM;
4942 		goto no_dd_data;
4943 	}
4944 
4945 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4946 	if (!bmp) {
4947 		rc = -ENOMEM;
4948 		goto free_dd;
4949 	}
4950 
4951 	cmdiocbq = lpfc_sli_get_iocbq(phba);
4952 	if (!cmdiocbq) {
4953 		rc = -ENOMEM;
4954 		goto free_bmp;
4955 	}
4956 
4957 	rspiocbq = lpfc_sli_get_iocbq(phba);
4958 	if (!rspiocbq) {
4959 		rc = -ENOMEM;
4960 		goto free_cmdiocbq;
4961 	}
4962 
4963 	rsp = &rspiocbq->iocb;
4964 
4965 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4966 	if (!bmp->virt) {
4967 		rc = -ENOMEM;
4968 		goto free_rspiocbq;
4969 	}
4970 
4971 	INIT_LIST_HEAD(&bmp->list);
4972 	bpl = (struct ulp_bde64 *) bmp->virt;
4973 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
4974 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
4975 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
4976 		busaddr = sg_dma_address(sgel);
4977 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
4978 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
4979 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
4980 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4981 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4982 		bpl++;
4983 	}
4984 
4985 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
4986 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4987 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
4988 		busaddr = sg_dma_address(sgel);
4989 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4990 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
4991 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
4992 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4993 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4994 		bpl++;
4995 	}
4996 
4997 	cmd = &cmdiocbq->iocb;
4998 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
4999 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5000 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5001 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5002 	cmd->un.genreq64.bdl.bdeSize =
5003 	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5004 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5005 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5006 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5007 	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5008 	cmd->ulpBdeCount = 1;
5009 	cmd->ulpClass = CLASS3;
5010 	cmd->ulpOwner = OWN_CHIP;
5011 	cmd->ulpLe = 1; /* Limited Edition */
5012 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5013 	cmdiocbq->vport = phba->pport;
5014 	/* We want the firmware to timeout before we do */
5015 	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5016 	cmdiocbq->context3 = bmp;
5017 	cmdiocbq->context2 = rspiocbq;
5018 	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5019 	cmdiocbq->context1 = dd_data;
5020 	cmdiocbq->context2 = rspiocbq;
5021 	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5022 		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5023 		cmd->ulpPU = MENLO_PU; /* 3 */
5024 		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5025 		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5026 	} else {
5027 		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5028 		cmd->ulpPU = 1;
5029 		cmd->un.ulpWord[4] = 0;
5030 		cmd->ulpContext = menlo_cmd->xri;
5031 	}
5032 
5033 	dd_data->type = TYPE_MENLO;
5034 	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5035 	dd_data->context_un.menlo.rspiocbq = rspiocbq;
5036 	dd_data->context_un.menlo.set_job = job;
5037 	dd_data->context_un.menlo.bmp = bmp;
5038 
5039 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5040 		MENLO_TIMEOUT - 5);
5041 	if (rc == IOCB_SUCCESS)
5042 		return 0; /* done for now */
5043 
5044 	/* iocb failed so cleanup */
5045 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
5046 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
5047 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
5048 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
5049 
5050 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5051 
5052 free_rspiocbq:
5053 	lpfc_sli_release_iocbq(phba, rspiocbq);
5054 free_cmdiocbq:
5055 	lpfc_sli_release_iocbq(phba, cmdiocbq);
5056 free_bmp:
5057 	kfree(bmp);
5058 free_dd:
5059 	kfree(dd_data);
5060 no_dd_data:
5061 	/* make error code available to userspace */
5062 	job->reply->result = rc;
5063 	job->dd_data = NULL;
5064 	return rc;
5065 }
5066 
5067 /**
5068  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5069  * @job: fc_bsg_job to handle
5070  **/
5071 static int
5072 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
5073 {
5074 	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
5075 	int rc;
5076 
5077 	switch (command) {
5078 	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5079 		rc = lpfc_bsg_hba_set_event(job);
5080 		break;
5081 	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5082 		rc = lpfc_bsg_hba_get_event(job);
5083 		break;
5084 	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5085 		rc = lpfc_bsg_send_mgmt_rsp(job);
5086 		break;
5087 	case LPFC_BSG_VENDOR_DIAG_MODE:
5088 		rc = lpfc_bsg_diag_loopback_mode(job);
5089 		break;
5090 	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5091 		rc = lpfc_sli4_bsg_diag_mode_end(job);
5092 		break;
5093 	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5094 		rc = lpfc_bsg_diag_loopback_run(job);
5095 		break;
5096 	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5097 		rc = lpfc_sli4_bsg_link_diag_test(job);
5098 		break;
5099 	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5100 		rc = lpfc_bsg_get_dfc_rev(job);
5101 		break;
5102 	case LPFC_BSG_VENDOR_MBOX:
5103 		rc = lpfc_bsg_mbox_cmd(job);
5104 		break;
5105 	case LPFC_BSG_VENDOR_MENLO_CMD:
5106 	case LPFC_BSG_VENDOR_MENLO_DATA:
5107 		rc = lpfc_menlo_cmd(job);
5108 		break;
5109 	default:
5110 		rc = -EINVAL;
5111 		job->reply->reply_payload_rcv_len = 0;
5112 		/* make error code available to userspace */
5113 		job->reply->result = rc;
5114 		break;
5115 	}
5116 
5117 	return rc;
5118 }
5119 
5120 /**
5121  * lpfc_bsg_request - handle a bsg request from the FC transport
5122  * @job: fc_bsg_job to handle
5123  **/
5124 int
5125 lpfc_bsg_request(struct fc_bsg_job *job)
5126 {
5127 	uint32_t msgcode;
5128 	int rc;
5129 
5130 	msgcode = job->request->msgcode;
5131 	switch (msgcode) {
5132 	case FC_BSG_HST_VENDOR:
5133 		rc = lpfc_bsg_hst_vendor(job);
5134 		break;
5135 	case FC_BSG_RPT_ELS:
5136 		rc = lpfc_bsg_rport_els(job);
5137 		break;
5138 	case FC_BSG_RPT_CT:
5139 		rc = lpfc_bsg_send_mgmt_cmd(job);
5140 		break;
5141 	default:
5142 		rc = -EINVAL;
5143 		job->reply->reply_payload_rcv_len = 0;
5144 		/* make error code available to userspace */
5145 		job->reply->result = rc;
5146 		break;
5147 	}
5148 
5149 	return rc;
5150 }
5151 
5152 /**
5153  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5154  * @job: fc_bsg_job that has timed out
5155  *
5156  * This function just aborts the job's IOCB.  The aborted IOCB will return to
5157  * the waiting function which will handle passing the error back to userspace
5158  **/
5159 int
5160 lpfc_bsg_timeout(struct fc_bsg_job *job)
5161 {
5162 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5163 	struct lpfc_hba *phba = vport->phba;
5164 	struct lpfc_iocbq *cmdiocb;
5165 	struct lpfc_bsg_event *evt;
5166 	struct lpfc_bsg_iocb *iocb;
5167 	struct lpfc_bsg_mbox *mbox;
5168 	struct lpfc_bsg_menlo *menlo;
5169 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5170 	struct bsg_job_data *dd_data;
5171 	unsigned long flags;
5172 
5173 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5174 	dd_data = (struct bsg_job_data *)job->dd_data;
5175 	/* timeout and completion crossed paths if no dd_data */
5176 	if (!dd_data) {
5177 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5178 		return 0;
5179 	}
5180 
5181 	switch (dd_data->type) {
5182 	case TYPE_IOCB:
5183 		iocb = &dd_data->context_un.iocb;
5184 		cmdiocb = iocb->cmdiocbq;
5185 		/* hint to completion handler that the job timed out */
5186 		job->reply->result = -EAGAIN;
5187 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5188 		/* this will call our completion handler */
5189 		spin_lock_irq(&phba->hbalock);
5190 		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5191 		spin_unlock_irq(&phba->hbalock);
5192 		break;
5193 	case TYPE_EVT:
5194 		evt = dd_data->context_un.evt;
5195 		/* this event has no job anymore */
5196 		evt->set_job = NULL;
5197 		job->dd_data = NULL;
5198 		job->reply->reply_payload_rcv_len = 0;
5199 		/* Return -EAGAIN which is our way of signallying the
5200 		 * app to retry.
5201 		 */
5202 		job->reply->result = -EAGAIN;
5203 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5204 		job->job_done(job);
5205 		break;
5206 	case TYPE_MBOX:
5207 		mbox = &dd_data->context_un.mbox;
5208 		/* this mbox has no job anymore */
5209 		mbox->set_job = NULL;
5210 		job->dd_data = NULL;
5211 		job->reply->reply_payload_rcv_len = 0;
5212 		job->reply->result = -EAGAIN;
5213 		/* the mbox completion handler can now be run */
5214 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5215 		job->job_done(job);
5216 		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5217 			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5218 		break;
5219 	case TYPE_MENLO:
5220 		menlo = &dd_data->context_un.menlo;
5221 		cmdiocb = menlo->cmdiocbq;
5222 		/* hint to completion handler that the job timed out */
5223 		job->reply->result = -EAGAIN;
5224 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5225 		/* this will call our completion handler */
5226 		spin_lock_irq(&phba->hbalock);
5227 		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5228 		spin_unlock_irq(&phba->hbalock);
5229 		break;
5230 	default:
5231 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5232 		break;
5233 	}
5234 
5235 	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5236 	 * otherwise an error message will be displayed on the console
5237 	 * so always return success (zero)
5238 	 */
5239 	return 0;
5240 }
5241