xref: /linux/drivers/scsi/lpfc/lpfc_nportdisc.c (revision fbc872c38c8fed31948c85683b5326ee5ab9fccc)
1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44 
45 
46 /* Called to verify a rcv'ed ADISC was intended for us. */
47 static int
48 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49 		 struct lpfc_name *nn, struct lpfc_name *pn)
50 {
51 	/* First, we MUST have a RPI registered */
52 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53 		return 0;
54 
55 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
56 	 * table entry for that node.
57 	 */
58 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
59 		return 0;
60 
61 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
62 		return 0;
63 
64 	/* we match, return success */
65 	return 1;
66 }
67 
68 int
69 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
70 		 struct serv_parm *sp, uint32_t class, int flogi)
71 {
72 	volatile struct serv_parm *hsp = &vport->fc_sparam;
73 	uint16_t hsp_value, ssp_value = 0;
74 
75 	/*
76 	 * The receive data field size and buffer-to-buffer receive data field
77 	 * size entries are 16 bits but are represented as two 8-bit fields in
78 	 * the driver data structure to account for rsvd bits and other control
79 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
80 	 * correcting the byte values.
81 	 */
82 	if (sp->cls1.classValid) {
83 		if (!flogi) {
84 			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
85 				     hsp->cls1.rcvDataSizeLsb);
86 			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
87 				     sp->cls1.rcvDataSizeLsb);
88 			if (!ssp_value)
89 				goto bad_service_param;
90 			if (ssp_value > hsp_value) {
91 				sp->cls1.rcvDataSizeLsb =
92 					hsp->cls1.rcvDataSizeLsb;
93 				sp->cls1.rcvDataSizeMsb =
94 					hsp->cls1.rcvDataSizeMsb;
95 			}
96 		}
97 	} else if (class == CLASS1)
98 		goto bad_service_param;
99 	if (sp->cls2.classValid) {
100 		if (!flogi) {
101 			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
102 				     hsp->cls2.rcvDataSizeLsb);
103 			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
104 				     sp->cls2.rcvDataSizeLsb);
105 			if (!ssp_value)
106 				goto bad_service_param;
107 			if (ssp_value > hsp_value) {
108 				sp->cls2.rcvDataSizeLsb =
109 					hsp->cls2.rcvDataSizeLsb;
110 				sp->cls2.rcvDataSizeMsb =
111 					hsp->cls2.rcvDataSizeMsb;
112 			}
113 		}
114 	} else if (class == CLASS2)
115 		goto bad_service_param;
116 	if (sp->cls3.classValid) {
117 		if (!flogi) {
118 			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
119 				     hsp->cls3.rcvDataSizeLsb);
120 			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
121 				     sp->cls3.rcvDataSizeLsb);
122 			if (!ssp_value)
123 				goto bad_service_param;
124 			if (ssp_value > hsp_value) {
125 				sp->cls3.rcvDataSizeLsb =
126 					hsp->cls3.rcvDataSizeLsb;
127 				sp->cls3.rcvDataSizeMsb =
128 					hsp->cls3.rcvDataSizeMsb;
129 			}
130 		}
131 	} else if (class == CLASS3)
132 		goto bad_service_param;
133 
134 	/*
135 	 * Preserve the upper four bits of the MSB from the PLOGI response.
136 	 * These bits contain the Buffer-to-Buffer State Change Number
137 	 * from the target and need to be passed to the FW.
138 	 */
139 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
140 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
141 	if (ssp_value > hsp_value) {
142 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
143 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
144 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
145 	}
146 
147 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
148 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
149 	return 1;
150 bad_service_param:
151 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
152 			 "0207 Device %x "
153 			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
154 			 "invalid service parameters.  Ignoring device.\n",
155 			 ndlp->nlp_DID,
156 			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
157 			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
158 			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
159 			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
160 	return 0;
161 }
162 
163 static void *
164 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
165 			struct lpfc_iocbq *rspiocb)
166 {
167 	struct lpfc_dmabuf *pcmd, *prsp;
168 	uint32_t *lp;
169 	void     *ptr = NULL;
170 	IOCB_t   *irsp;
171 
172 	irsp = &rspiocb->iocb;
173 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
174 
175 	/* For lpfc_els_abort, context2 could be zero'ed to delay
176 	 * freeing associated memory till after ABTS completes.
177 	 */
178 	if (pcmd) {
179 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
180 				       list);
181 		if (prsp) {
182 			lp = (uint32_t *) prsp->virt;
183 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
184 		}
185 	} else {
186 		/* Force ulpStatus error since we are returning NULL ptr */
187 		if (!(irsp->ulpStatus)) {
188 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
189 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
190 		}
191 		ptr = NULL;
192 	}
193 	return ptr;
194 }
195 
196 
197 
198 /*
199  * Free resources / clean up outstanding I/Os
200  * associated with a LPFC_NODELIST entry. This
201  * routine effectively results in a "software abort".
202  */
203 int
204 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205 {
206 	LIST_HEAD(abort_list);
207 	struct lpfc_sli  *psli = &phba->sli;
208 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
209 	struct lpfc_iocbq *iocb, *next_iocb;
210 
211 	/* Abort outstanding I/O on NPort <nlp_DID> */
212 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
213 			 "2819 Abort outstanding I/O on NPort x%x "
214 			 "Data: x%x x%x x%x\n",
215 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
216 			 ndlp->nlp_rpi);
217 	/* Clean up all fabric IOs first.*/
218 	lpfc_fabric_abort_nport(ndlp);
219 
220 	/*
221 	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
222 	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the
223 	 * txcmplq so that the abort operation completes them successfully.
224 	 */
225 	spin_lock_irq(&phba->hbalock);
226 	if (phba->sli_rev == LPFC_SLI_REV4)
227 		spin_lock(&pring->ring_lock);
228 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
229 	/* Add to abort_list on on NDLP match. */
230 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
231 			list_add_tail(&iocb->dlist, &abort_list);
232 	}
233 	if (phba->sli_rev == LPFC_SLI_REV4)
234 		spin_unlock(&pring->ring_lock);
235 	spin_unlock_irq(&phba->hbalock);
236 
237 	/* Abort the targeted IOs and remove them from the abort list. */
238 	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
239 			spin_lock_irq(&phba->hbalock);
240 			list_del_init(&iocb->dlist);
241 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
242 			spin_unlock_irq(&phba->hbalock);
243 	}
244 
245 	INIT_LIST_HEAD(&abort_list);
246 
247 	/* Now process the txq */
248 	spin_lock_irq(&phba->hbalock);
249 	if (phba->sli_rev == LPFC_SLI_REV4)
250 		spin_lock(&pring->ring_lock);
251 
252 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
253 		/* Check to see if iocb matches the nport we are looking for */
254 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
255 			list_del_init(&iocb->list);
256 			list_add_tail(&iocb->list, &abort_list);
257 		}
258 	}
259 
260 	if (phba->sli_rev == LPFC_SLI_REV4)
261 		spin_unlock(&pring->ring_lock);
262 	spin_unlock_irq(&phba->hbalock);
263 
264 	/* Cancel all the IOCBs from the completions list */
265 	lpfc_sli_cancel_iocbs(phba, &abort_list,
266 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
267 
268 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
269 	return 0;
270 }
271 
272 static int
273 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
274 	       struct lpfc_iocbq *cmdiocb)
275 {
276 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
277 	struct lpfc_hba    *phba = vport->phba;
278 	struct lpfc_dmabuf *pcmd;
279 	uint64_t nlp_portwwn = 0;
280 	uint32_t *lp;
281 	IOCB_t *icmd;
282 	struct serv_parm *sp;
283 	uint32_t ed_tov;
284 	LPFC_MBOXQ_t *mbox;
285 	struct ls_rjt stat;
286 	int rc;
287 
288 	memset(&stat, 0, sizeof (struct ls_rjt));
289 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
290 	lp = (uint32_t *) pcmd->virt;
291 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
292 	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
293 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
294 				 "0140 PLOGI Reject: invalid nname\n");
295 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
296 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
297 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
298 			NULL);
299 		return 0;
300 	}
301 	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
302 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
303 				 "0141 PLOGI Reject: invalid pname\n");
304 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
305 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
306 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
307 			NULL);
308 		return 0;
309 	}
310 
311 	nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
312 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
313 		/* Reject this request because invalid parameters */
314 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
315 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
316 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
317 			NULL);
318 		return 0;
319 	}
320 	icmd = &cmdiocb->iocb;
321 
322 	/* PLOGI chkparm OK */
323 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
324 			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
325 			 "x%x x%x x%x\n",
326 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
327 			 ndlp->nlp_rpi, vport->port_state,
328 			 vport->fc_flag);
329 
330 	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
331 		ndlp->nlp_fcp_info |= CLASS2;
332 	else
333 		ndlp->nlp_fcp_info |= CLASS3;
334 
335 	ndlp->nlp_class_sup = 0;
336 	if (sp->cls1.classValid)
337 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
338 	if (sp->cls2.classValid)
339 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
340 	if (sp->cls3.classValid)
341 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
342 	if (sp->cls4.classValid)
343 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
344 	ndlp->nlp_maxframe =
345 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
346 
347 	/* if already logged in, do implicit logout */
348 	switch (ndlp->nlp_state) {
349 	case  NLP_STE_NPR_NODE:
350 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
351 			break;
352 	case  NLP_STE_REG_LOGIN_ISSUE:
353 	case  NLP_STE_PRLI_ISSUE:
354 	case  NLP_STE_UNMAPPED_NODE:
355 	case  NLP_STE_MAPPED_NODE:
356 		/* lpfc_plogi_confirm_nport skips fabric did, handle it here */
357 		if (!(ndlp->nlp_type & NLP_FABRIC)) {
358 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
359 					 ndlp, NULL);
360 			return 1;
361 		}
362 		if (nlp_portwwn != 0 &&
363 		    nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
364 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
365 					 "0143 PLOGI recv'd from DID: x%x "
366 					 "WWPN changed: old %llx new %llx\n",
367 					 ndlp->nlp_DID,
368 					 (unsigned long long)nlp_portwwn,
369 					 (unsigned long long)
370 					 wwn_to_u64(sp->portName.u.wwn));
371 
372 		ndlp->nlp_prev_state = ndlp->nlp_state;
373 		/* rport needs to be unregistered first */
374 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
375 		break;
376 	}
377 
378 	/* Check for Nport to NPort pt2pt protocol */
379 	if ((vport->fc_flag & FC_PT2PT) &&
380 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
381 		/* rcv'ed PLOGI decides what our NPortId will be */
382 		vport->fc_myDID = icmd->un.rcvels.parmRo;
383 
384 		ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
385 		if (sp->cmn.edtovResolution) {
386 			/* E_D_TOV ticks are in nanoseconds */
387 			ed_tov = (phba->fc_edtov + 999999) / 1000000;
388 		}
389 
390 		/*
391 		 * For pt-to-pt, use the larger EDTOV
392 		 * RATOV = 2 * EDTOV
393 		 */
394 		if (ed_tov > phba->fc_edtov)
395 			phba->fc_edtov = ed_tov;
396 		phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
397 
398 		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
399 
400 		/* Issue config_link / reg_vfi to account for updated TOV's */
401 
402 		if (phba->sli_rev == LPFC_SLI_REV4)
403 			lpfc_issue_reg_vfi(vport);
404 		else {
405 			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
406 			if (mbox == NULL)
407 				goto out;
408 			lpfc_config_link(phba, mbox);
409 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
410 			mbox->vport = vport;
411 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
412 			if (rc == MBX_NOT_FINISHED) {
413 				mempool_free(mbox, phba->mbox_mem_pool);
414 				goto out;
415 			}
416 		}
417 
418 		lpfc_can_disctmo(vport);
419 	}
420 
421 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
422 	if (!mbox)
423 		goto out;
424 
425 	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
426 	if (phba->sli_rev == LPFC_SLI_REV4)
427 		lpfc_unreg_rpi(vport, ndlp);
428 
429 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
430 			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
431 	if (rc) {
432 		mempool_free(mbox, phba->mbox_mem_pool);
433 		goto out;
434 	}
435 
436 	/* ACC PLOGI rsp command needs to execute first,
437 	 * queue this mbox command to be processed later.
438 	 */
439 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
440 	/*
441 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
442 	 * command issued in lpfc_cmpl_els_acc().
443 	 */
444 	mbox->vport = vport;
445 	spin_lock_irq(shost->host_lock);
446 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
447 	spin_unlock_irq(shost->host_lock);
448 
449 	/*
450 	 * If there is an outstanding PLOGI issued, abort it before
451 	 * sending ACC rsp for received PLOGI. If pending plogi
452 	 * is not canceled here, the plogi will be rejected by
453 	 * remote port and will be retried. On a configuration with
454 	 * single discovery thread, this will cause a huge delay in
455 	 * discovery. Also this will cause multiple state machines
456 	 * running in parallel for this node.
457 	 */
458 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
459 		/* software abort outstanding PLOGI */
460 		lpfc_els_abort(phba, ndlp);
461 	}
462 
463 	if ((vport->port_type == LPFC_NPIV_PORT &&
464 	     vport->cfg_restrict_login)) {
465 
466 		/* In order to preserve RPIs, we want to cleanup
467 		 * the default RPI the firmware created to rcv
468 		 * this ELS request. The only way to do this is
469 		 * to register, then unregister the RPI.
470 		 */
471 		spin_lock_irq(shost->host_lock);
472 		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
473 		spin_unlock_irq(shost->host_lock);
474 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
475 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
476 		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
477 			ndlp, mbox);
478 		if (rc)
479 			mempool_free(mbox, phba->mbox_mem_pool);
480 		return 1;
481 	}
482 	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
483 	if (rc)
484 		mempool_free(mbox, phba->mbox_mem_pool);
485 	return 1;
486 out:
487 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
488 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
489 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
490 	return 0;
491 }
492 
493 /**
494  * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
495  * @phba: pointer to lpfc hba data structure.
496  * @mboxq: pointer to mailbox object
497  *
498  * This routine is invoked to issue a completion to a rcv'ed
499  * ADISC or PDISC after the paused RPI has been resumed.
500  **/
501 static void
502 lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
503 {
504 	struct lpfc_vport *vport;
505 	struct lpfc_iocbq *elsiocb;
506 	struct lpfc_nodelist *ndlp;
507 	uint32_t cmd;
508 
509 	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
510 	ndlp = (struct lpfc_nodelist *) mboxq->context2;
511 	vport = mboxq->vport;
512 	cmd = elsiocb->drvrTimeout;
513 
514 	if (cmd == ELS_CMD_ADISC) {
515 		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
516 	} else {
517 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
518 			ndlp, NULL);
519 	}
520 	kfree(elsiocb);
521 	mempool_free(mboxq, phba->mbox_mem_pool);
522 }
523 
524 static int
525 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
526 		struct lpfc_iocbq *cmdiocb)
527 {
528 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
529 	struct lpfc_iocbq  *elsiocb;
530 	struct lpfc_dmabuf *pcmd;
531 	struct serv_parm   *sp;
532 	struct lpfc_name   *pnn, *ppn;
533 	struct ls_rjt stat;
534 	ADISC *ap;
535 	IOCB_t *icmd;
536 	uint32_t *lp;
537 	uint32_t cmd;
538 
539 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
540 	lp = (uint32_t *) pcmd->virt;
541 
542 	cmd = *lp++;
543 	if (cmd == ELS_CMD_ADISC) {
544 		ap = (ADISC *) lp;
545 		pnn = (struct lpfc_name *) & ap->nodeName;
546 		ppn = (struct lpfc_name *) & ap->portName;
547 	} else {
548 		sp = (struct serv_parm *) lp;
549 		pnn = (struct lpfc_name *) & sp->nodeName;
550 		ppn = (struct lpfc_name *) & sp->portName;
551 	}
552 
553 	icmd = &cmdiocb->iocb;
554 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
555 
556 		/*
557 		 * As soon as  we send ACC, the remote NPort can
558 		 * start sending us data. Thus, for SLI4 we must
559 		 * resume the RPI before the ACC goes out.
560 		 */
561 		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
562 			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
563 				GFP_KERNEL);
564 			if (elsiocb) {
565 
566 				/* Save info from cmd IOCB used in rsp */
567 				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
568 					sizeof(struct lpfc_iocbq));
569 
570 				/* Save the ELS cmd */
571 				elsiocb->drvrTimeout = cmd;
572 
573 				lpfc_sli4_resume_rpi(ndlp,
574 					lpfc_mbx_cmpl_resume_rpi, elsiocb);
575 				goto out;
576 			}
577 		}
578 
579 		if (cmd == ELS_CMD_ADISC) {
580 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
581 		} else {
582 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
583 				ndlp, NULL);
584 		}
585 out:
586 		/* If we are authenticated, move to the proper state */
587 		if (ndlp->nlp_type & NLP_FCP_TARGET)
588 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
589 		else
590 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
591 
592 		return 1;
593 	}
594 	/* Reject this request because invalid parameters */
595 	stat.un.b.lsRjtRsvd0 = 0;
596 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
597 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
598 	stat.un.b.vendorUnique = 0;
599 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
600 
601 	/* 1 sec timeout */
602 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
603 
604 	spin_lock_irq(shost->host_lock);
605 	ndlp->nlp_flag |= NLP_DELAY_TMO;
606 	spin_unlock_irq(shost->host_lock);
607 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
608 	ndlp->nlp_prev_state = ndlp->nlp_state;
609 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
610 	return 0;
611 }
612 
613 static int
614 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
615 	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
616 {
617 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
618 	struct lpfc_hba    *phba = vport->phba;
619 	struct lpfc_vport **vports;
620 	int i, active_vlink_present = 0 ;
621 
622 	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
623 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
624 	 * PLOGIs during LOGO storms from a device.
625 	 */
626 	spin_lock_irq(shost->host_lock);
627 	ndlp->nlp_flag |= NLP_LOGO_ACC;
628 	spin_unlock_irq(shost->host_lock);
629 	if (els_cmd == ELS_CMD_PRLO)
630 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
631 	else
632 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
633 	if (ndlp->nlp_DID == Fabric_DID) {
634 		if (vport->port_state <= LPFC_FDISC)
635 			goto out;
636 		lpfc_linkdown_port(vport);
637 		spin_lock_irq(shost->host_lock);
638 		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
639 		spin_unlock_irq(shost->host_lock);
640 		vports = lpfc_create_vport_work_array(phba);
641 		if (vports) {
642 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
643 					i++) {
644 				if ((!(vports[i]->fc_flag &
645 					FC_VPORT_LOGO_RCVD)) &&
646 					(vports[i]->port_state > LPFC_FDISC)) {
647 					active_vlink_present = 1;
648 					break;
649 				}
650 			}
651 			lpfc_destroy_vport_work_array(phba, vports);
652 		}
653 
654 		/*
655 		 * Don't re-instantiate if vport is marked for deletion.
656 		 * If we are here first then vport_delete is going to wait
657 		 * for discovery to complete.
658 		 */
659 		if (!(vport->load_flag & FC_UNLOADING) &&
660 					active_vlink_present) {
661 			/*
662 			 * If there are other active VLinks present,
663 			 * re-instantiate the Vlink using FDISC.
664 			 */
665 			mod_timer(&ndlp->nlp_delayfunc,
666 				  jiffies + msecs_to_jiffies(1000));
667 			spin_lock_irq(shost->host_lock);
668 			ndlp->nlp_flag |= NLP_DELAY_TMO;
669 			spin_unlock_irq(shost->host_lock);
670 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
671 			vport->port_state = LPFC_FDISC;
672 		} else {
673 			spin_lock_irq(shost->host_lock);
674 			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
675 			spin_unlock_irq(shost->host_lock);
676 			lpfc_retry_pport_discovery(phba);
677 		}
678 	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
679 		((ndlp->nlp_type & NLP_FCP_TARGET) ||
680 		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
681 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
682 		/* Only try to re-login if this is NOT a Fabric Node */
683 		mod_timer(&ndlp->nlp_delayfunc,
684 			  jiffies + msecs_to_jiffies(1000 * 1));
685 		spin_lock_irq(shost->host_lock);
686 		ndlp->nlp_flag |= NLP_DELAY_TMO;
687 		spin_unlock_irq(shost->host_lock);
688 
689 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
690 	}
691 out:
692 	ndlp->nlp_prev_state = ndlp->nlp_state;
693 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
694 
695 	spin_lock_irq(shost->host_lock);
696 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
697 	spin_unlock_irq(shost->host_lock);
698 	/* The driver has to wait until the ACC completes before it continues
699 	 * processing the LOGO.  The action will resume in
700 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
701 	 * unreg_login, the driver waits so the ACC does not get aborted.
702 	 */
703 	return 0;
704 }
705 
706 static void
707 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
708 	      struct lpfc_iocbq *cmdiocb)
709 {
710 	struct lpfc_dmabuf *pcmd;
711 	uint32_t *lp;
712 	PRLI *npr;
713 	struct fc_rport *rport = ndlp->rport;
714 	u32 roles;
715 
716 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
717 	lp = (uint32_t *) pcmd->virt;
718 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
719 
720 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
721 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
722 	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
723 	if (npr->prliType == PRLI_FCP_TYPE) {
724 		if (npr->initiatorFunc)
725 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
726 		if (npr->targetFunc) {
727 			ndlp->nlp_type |= NLP_FCP_TARGET;
728 			if (npr->writeXferRdyDis)
729 				ndlp->nlp_flag |= NLP_FIRSTBURST;
730 		}
731 		if (npr->Retry)
732 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
733 	}
734 	if (rport) {
735 		/* We need to update the rport role values */
736 		roles = FC_RPORT_ROLE_UNKNOWN;
737 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
738 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
739 		if (ndlp->nlp_type & NLP_FCP_TARGET)
740 			roles |= FC_RPORT_ROLE_FCP_TARGET;
741 
742 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
743 			"rport rolechg:   role:x%x did:x%x flg:x%x",
744 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
745 
746 		fc_remote_port_rolechg(rport, roles);
747 	}
748 }
749 
750 static uint32_t
751 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
752 {
753 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
754 
755 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
756 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
757 		return 0;
758 	}
759 
760 	if (!(vport->fc_flag & FC_PT2PT)) {
761 		/* Check config parameter use-adisc or FCP-2 */
762 		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
763 		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
764 		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
765 			spin_lock_irq(shost->host_lock);
766 			ndlp->nlp_flag |= NLP_NPR_ADISC;
767 			spin_unlock_irq(shost->host_lock);
768 			return 1;
769 		}
770 	}
771 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
772 	lpfc_unreg_rpi(vport, ndlp);
773 	return 0;
774 }
775 
776 /**
777  * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
778  * @phba : Pointer to lpfc_hba structure.
779  * @vport: Pointer to lpfc_vport structure.
780  * @rpi  : rpi to be release.
781  *
782  * This function will send a unreg_login mailbox command to the firmware
783  * to release a rpi.
784  **/
785 void
786 lpfc_release_rpi(struct lpfc_hba *phba,
787 		struct lpfc_vport *vport,
788 		uint16_t rpi)
789 {
790 	LPFC_MBOXQ_t *pmb;
791 	int rc;
792 
793 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
794 			GFP_KERNEL);
795 	if (!pmb)
796 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
797 			"2796 mailbox memory allocation failed \n");
798 	else {
799 		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
800 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
801 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
802 		if (rc == MBX_NOT_FINISHED)
803 			mempool_free(pmb, phba->mbox_mem_pool);
804 	}
805 }
806 
807 static uint32_t
808 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
809 		  void *arg, uint32_t evt)
810 {
811 	struct lpfc_hba *phba;
812 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
813 	uint16_t rpi;
814 
815 	phba = vport->phba;
816 	/* Release the RPI if reglogin completing */
817 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
818 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
819 		(!pmb->u.mb.mbxStatus)) {
820 		rpi = pmb->u.mb.un.varWords[0];
821 		lpfc_release_rpi(phba, vport, rpi);
822 	}
823 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
824 			 "0271 Illegal State Transition: node x%x "
825 			 "event x%x, state x%x Data: x%x x%x\n",
826 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
827 			 ndlp->nlp_flag);
828 	return ndlp->nlp_state;
829 }
830 
831 static uint32_t
832 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
833 		  void *arg, uint32_t evt)
834 {
835 	/* This transition is only legal if we previously
836 	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
837 	 * working on the same NPortID, do nothing for this thread
838 	 * to stop it.
839 	 */
840 	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
841 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
842 			 "0272 Illegal State Transition: node x%x "
843 			 "event x%x, state x%x Data: x%x x%x\n",
844 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
845 			 ndlp->nlp_flag);
846 	}
847 	return ndlp->nlp_state;
848 }
849 
850 /* Start of Discovery State Machine routines */
851 
852 static uint32_t
853 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
854 			   void *arg, uint32_t evt)
855 {
856 	struct lpfc_iocbq *cmdiocb;
857 
858 	cmdiocb = (struct lpfc_iocbq *) arg;
859 
860 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
861 		return ndlp->nlp_state;
862 	}
863 	return NLP_STE_FREED_NODE;
864 }
865 
866 static uint32_t
867 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
868 			 void *arg, uint32_t evt)
869 {
870 	lpfc_issue_els_logo(vport, ndlp, 0);
871 	return ndlp->nlp_state;
872 }
873 
874 static uint32_t
875 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
876 			  void *arg, uint32_t evt)
877 {
878 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
879 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
880 
881 	spin_lock_irq(shost->host_lock);
882 	ndlp->nlp_flag |= NLP_LOGO_ACC;
883 	spin_unlock_irq(shost->host_lock);
884 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
885 
886 	return ndlp->nlp_state;
887 }
888 
889 static uint32_t
890 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
891 			   void *arg, uint32_t evt)
892 {
893 	return NLP_STE_FREED_NODE;
894 }
895 
896 static uint32_t
897 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
898 			   void *arg, uint32_t evt)
899 {
900 	return NLP_STE_FREED_NODE;
901 }
902 
903 static uint32_t
904 lpfc_device_recov_unused_node(struct lpfc_vport *vport,
905 			struct lpfc_nodelist *ndlp,
906 			   void *arg, uint32_t evt)
907 {
908 	return ndlp->nlp_state;
909 }
910 
911 static uint32_t
912 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
913 			   void *arg, uint32_t evt)
914 {
915 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
916 	struct lpfc_hba   *phba = vport->phba;
917 	struct lpfc_iocbq *cmdiocb = arg;
918 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
919 	uint32_t *lp = (uint32_t *) pcmd->virt;
920 	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
921 	struct ls_rjt stat;
922 	int port_cmp;
923 
924 	memset(&stat, 0, sizeof (struct ls_rjt));
925 
926 	/* For a PLOGI, we only accept if our portname is less
927 	 * than the remote portname.
928 	 */
929 	phba->fc_stat.elsLogiCol++;
930 	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
931 			  sizeof(struct lpfc_name));
932 
933 	if (port_cmp >= 0) {
934 		/* Reject this request because the remote node will accept
935 		   ours */
936 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
937 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
938 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
939 			NULL);
940 	} else {
941 		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
942 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
943 		    (vport->num_disc_nodes)) {
944 			spin_lock_irq(shost->host_lock);
945 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
946 			spin_unlock_irq(shost->host_lock);
947 			/* Check if there are more PLOGIs to be sent */
948 			lpfc_more_plogi(vport);
949 			if (vport->num_disc_nodes == 0) {
950 				spin_lock_irq(shost->host_lock);
951 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
952 				spin_unlock_irq(shost->host_lock);
953 				lpfc_can_disctmo(vport);
954 				lpfc_end_rscn(vport);
955 			}
956 		}
957 	} /* If our portname was less */
958 
959 	return ndlp->nlp_state;
960 }
961 
962 static uint32_t
963 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
964 			  void *arg, uint32_t evt)
965 {
966 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
967 	struct ls_rjt     stat;
968 
969 	memset(&stat, 0, sizeof (struct ls_rjt));
970 	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
971 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
972 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
973 	return ndlp->nlp_state;
974 }
975 
976 static uint32_t
977 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
978 			  void *arg, uint32_t evt)
979 {
980 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
981 
982 				/* software abort outstanding PLOGI */
983 	lpfc_els_abort(vport->phba, ndlp);
984 
985 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
986 	return ndlp->nlp_state;
987 }
988 
989 static uint32_t
990 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
991 			 void *arg, uint32_t evt)
992 {
993 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
994 	struct lpfc_hba   *phba = vport->phba;
995 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
996 
997 	/* software abort outstanding PLOGI */
998 	lpfc_els_abort(phba, ndlp);
999 
1000 	if (evt == NLP_EVT_RCV_LOGO) {
1001 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1002 	} else {
1003 		lpfc_issue_els_logo(vport, ndlp, 0);
1004 	}
1005 
1006 	/* Put ndlp in npr state set plogi timer for 1 sec */
1007 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
1008 	spin_lock_irq(shost->host_lock);
1009 	ndlp->nlp_flag |= NLP_DELAY_TMO;
1010 	spin_unlock_irq(shost->host_lock);
1011 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1012 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1013 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1014 
1015 	return ndlp->nlp_state;
1016 }
1017 
1018 static uint32_t
1019 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1020 			    struct lpfc_nodelist *ndlp,
1021 			    void *arg,
1022 			    uint32_t evt)
1023 {
1024 	struct lpfc_hba    *phba = vport->phba;
1025 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1026 	struct lpfc_iocbq  *cmdiocb, *rspiocb;
1027 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
1028 	uint32_t *lp;
1029 	IOCB_t *irsp;
1030 	struct serv_parm *sp;
1031 	uint32_t ed_tov;
1032 	LPFC_MBOXQ_t *mbox;
1033 	int rc;
1034 
1035 	cmdiocb = (struct lpfc_iocbq *) arg;
1036 	rspiocb = cmdiocb->context_un.rsp_iocb;
1037 
1038 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1039 		/* Recovery from PLOGI collision logic */
1040 		return ndlp->nlp_state;
1041 	}
1042 
1043 	irsp = &rspiocb->iocb;
1044 
1045 	if (irsp->ulpStatus)
1046 		goto out;
1047 
1048 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1049 
1050 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1051 	if (!prsp)
1052 		goto out;
1053 
1054 	lp = (uint32_t *) prsp->virt;
1055 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1056 
1057 	/* Some switches have FDMI servers returning 0 for WWN */
1058 	if ((ndlp->nlp_DID != FDMI_DID) &&
1059 		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
1060 		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1061 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1062 				 "0142 PLOGI RSP: Invalid WWN.\n");
1063 		goto out;
1064 	}
1065 	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1066 		goto out;
1067 	/* PLOGI chkparm OK */
1068 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1069 			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1070 			 ndlp->nlp_DID, ndlp->nlp_state,
1071 			 ndlp->nlp_flag, ndlp->nlp_rpi);
1072 	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1073 		ndlp->nlp_fcp_info |= CLASS2;
1074 	else
1075 		ndlp->nlp_fcp_info |= CLASS3;
1076 
1077 	ndlp->nlp_class_sup = 0;
1078 	if (sp->cls1.classValid)
1079 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
1080 	if (sp->cls2.classValid)
1081 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
1082 	if (sp->cls3.classValid)
1083 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
1084 	if (sp->cls4.classValid)
1085 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
1086 	ndlp->nlp_maxframe =
1087 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1088 
1089 	if ((vport->fc_flag & FC_PT2PT) &&
1090 	    (vport->fc_flag & FC_PT2PT_PLOGI)) {
1091 		ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
1092 		if (sp->cmn.edtovResolution) {
1093 			/* E_D_TOV ticks are in nanoseconds */
1094 			ed_tov = (phba->fc_edtov + 999999) / 1000000;
1095 		}
1096 
1097 		/*
1098 		 * Use the larger EDTOV
1099 		 * RATOV = 2 * EDTOV for pt-to-pt
1100 		 */
1101 		if (ed_tov > phba->fc_edtov)
1102 			phba->fc_edtov = ed_tov;
1103 		phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
1104 
1105 		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
1106 
1107 		/* Issue config_link / reg_vfi to account for updated TOV's */
1108 		if (phba->sli_rev == LPFC_SLI_REV4) {
1109 			lpfc_issue_reg_vfi(vport);
1110 		} else {
1111 			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1112 			if (!mbox) {
1113 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1114 						 "0133 PLOGI: no memory "
1115 						 "for config_link "
1116 						 "Data: x%x x%x x%x x%x\n",
1117 						 ndlp->nlp_DID, ndlp->nlp_state,
1118 						 ndlp->nlp_flag, ndlp->nlp_rpi);
1119 				goto out;
1120 			}
1121 
1122 			lpfc_config_link(phba, mbox);
1123 
1124 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1125 			mbox->vport = vport;
1126 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1127 			if (rc == MBX_NOT_FINISHED) {
1128 				mempool_free(mbox, phba->mbox_mem_pool);
1129 				goto out;
1130 			}
1131 		}
1132 	}
1133 
1134 	lpfc_unreg_rpi(vport, ndlp);
1135 
1136 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1137 	if (!mbox) {
1138 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1139 				 "0018 PLOGI: no memory for reg_login "
1140 				 "Data: x%x x%x x%x x%x\n",
1141 				 ndlp->nlp_DID, ndlp->nlp_state,
1142 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1143 		goto out;
1144 	}
1145 
1146 	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1147 			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1148 		switch (ndlp->nlp_DID) {
1149 		case NameServer_DID:
1150 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1151 			break;
1152 		case FDMI_DID:
1153 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1154 			break;
1155 		default:
1156 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1157 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1158 		}
1159 		mbox->context2 = lpfc_nlp_get(ndlp);
1160 		mbox->vport = vport;
1161 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1162 		    != MBX_NOT_FINISHED) {
1163 			lpfc_nlp_set_state(vport, ndlp,
1164 					   NLP_STE_REG_LOGIN_ISSUE);
1165 			return ndlp->nlp_state;
1166 		}
1167 		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1168 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1169 		/* decrement node reference count to the failed mbox
1170 		 * command
1171 		 */
1172 		lpfc_nlp_put(ndlp);
1173 		mp = (struct lpfc_dmabuf *) mbox->context1;
1174 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1175 		kfree(mp);
1176 		mempool_free(mbox, phba->mbox_mem_pool);
1177 
1178 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1179 				 "0134 PLOGI: cannot issue reg_login "
1180 				 "Data: x%x x%x x%x x%x\n",
1181 				 ndlp->nlp_DID, ndlp->nlp_state,
1182 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1183 	} else {
1184 		mempool_free(mbox, phba->mbox_mem_pool);
1185 
1186 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1187 				 "0135 PLOGI: cannot format reg_login "
1188 				 "Data: x%x x%x x%x x%x\n",
1189 				 ndlp->nlp_DID, ndlp->nlp_state,
1190 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1191 	}
1192 
1193 
1194 out:
1195 	if (ndlp->nlp_DID == NameServer_DID) {
1196 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1197 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1198 				 "0261 Cannot Register NameServer login\n");
1199 	}
1200 
1201 	/*
1202 	** In case the node reference counter does not go to zero, ensure that
1203 	** the stale state for the node is not processed.
1204 	*/
1205 
1206 	ndlp->nlp_prev_state = ndlp->nlp_state;
1207 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1208 	spin_lock_irq(shost->host_lock);
1209 	ndlp->nlp_flag |= NLP_DEFER_RM;
1210 	spin_unlock_irq(shost->host_lock);
1211 	return NLP_STE_FREED_NODE;
1212 }
1213 
1214 static uint32_t
1215 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1216 			   void *arg, uint32_t evt)
1217 {
1218 	return ndlp->nlp_state;
1219 }
1220 
1221 static uint32_t
1222 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1223 	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1224 {
1225 	struct lpfc_hba *phba;
1226 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1227 	MAILBOX_t *mb = &pmb->u.mb;
1228 	uint16_t rpi;
1229 
1230 	phba = vport->phba;
1231 	/* Release the RPI */
1232 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1233 		!mb->mbxStatus) {
1234 		rpi = pmb->u.mb.un.varWords[0];
1235 		lpfc_release_rpi(phba, vport, rpi);
1236 	}
1237 	return ndlp->nlp_state;
1238 }
1239 
1240 static uint32_t
1241 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1242 			   void *arg, uint32_t evt)
1243 {
1244 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1245 
1246 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1247 		spin_lock_irq(shost->host_lock);
1248 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1249 		spin_unlock_irq(shost->host_lock);
1250 		return ndlp->nlp_state;
1251 	} else {
1252 		/* software abort outstanding PLOGI */
1253 		lpfc_els_abort(vport->phba, ndlp);
1254 
1255 		lpfc_drop_node(vport, ndlp);
1256 		return NLP_STE_FREED_NODE;
1257 	}
1258 }
1259 
1260 static uint32_t
1261 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1262 			      struct lpfc_nodelist *ndlp,
1263 			      void *arg,
1264 			      uint32_t evt)
1265 {
1266 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1267 	struct lpfc_hba  *phba = vport->phba;
1268 
1269 	/* Don't do anything that will mess up processing of the
1270 	 * previous RSCN.
1271 	 */
1272 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1273 		return ndlp->nlp_state;
1274 
1275 	/* software abort outstanding PLOGI */
1276 	lpfc_els_abort(phba, ndlp);
1277 
1278 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1279 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1280 	spin_lock_irq(shost->host_lock);
1281 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1282 	spin_unlock_irq(shost->host_lock);
1283 
1284 	return ndlp->nlp_state;
1285 }
1286 
1287 static uint32_t
1288 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1289 			   void *arg, uint32_t evt)
1290 {
1291 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1292 	struct lpfc_hba   *phba = vport->phba;
1293 	struct lpfc_iocbq *cmdiocb;
1294 
1295 	/* software abort outstanding ADISC */
1296 	lpfc_els_abort(phba, ndlp);
1297 
1298 	cmdiocb = (struct lpfc_iocbq *) arg;
1299 
1300 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1301 		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1302 			spin_lock_irq(shost->host_lock);
1303 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1304 			spin_unlock_irq(shost->host_lock);
1305 			if (vport->num_disc_nodes)
1306 				lpfc_more_adisc(vport);
1307 		}
1308 		return ndlp->nlp_state;
1309 	}
1310 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1311 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1312 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1313 
1314 	return ndlp->nlp_state;
1315 }
1316 
1317 static uint32_t
1318 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1319 			  void *arg, uint32_t evt)
1320 {
1321 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1322 
1323 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1324 	return ndlp->nlp_state;
1325 }
1326 
1327 static uint32_t
1328 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1329 			  void *arg, uint32_t evt)
1330 {
1331 	struct lpfc_hba *phba = vport->phba;
1332 	struct lpfc_iocbq *cmdiocb;
1333 
1334 	cmdiocb = (struct lpfc_iocbq *) arg;
1335 
1336 	/* software abort outstanding ADISC */
1337 	lpfc_els_abort(phba, ndlp);
1338 
1339 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1340 	return ndlp->nlp_state;
1341 }
1342 
1343 static uint32_t
1344 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1345 			    struct lpfc_nodelist *ndlp,
1346 			    void *arg, uint32_t evt)
1347 {
1348 	struct lpfc_iocbq *cmdiocb;
1349 
1350 	cmdiocb = (struct lpfc_iocbq *) arg;
1351 
1352 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1353 	return ndlp->nlp_state;
1354 }
1355 
1356 static uint32_t
1357 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1358 			  void *arg, uint32_t evt)
1359 {
1360 	struct lpfc_iocbq *cmdiocb;
1361 
1362 	cmdiocb = (struct lpfc_iocbq *) arg;
1363 
1364 	/* Treat like rcv logo */
1365 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1366 	return ndlp->nlp_state;
1367 }
1368 
1369 static uint32_t
1370 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1371 			    struct lpfc_nodelist *ndlp,
1372 			    void *arg, uint32_t evt)
1373 {
1374 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1375 	struct lpfc_hba   *phba = vport->phba;
1376 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1377 	IOCB_t *irsp;
1378 	ADISC *ap;
1379 	int rc;
1380 
1381 	cmdiocb = (struct lpfc_iocbq *) arg;
1382 	rspiocb = cmdiocb->context_un.rsp_iocb;
1383 
1384 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1385 	irsp = &rspiocb->iocb;
1386 
1387 	if ((irsp->ulpStatus) ||
1388 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1389 		/* 1 sec timeout */
1390 		mod_timer(&ndlp->nlp_delayfunc,
1391 			  jiffies + msecs_to_jiffies(1000));
1392 		spin_lock_irq(shost->host_lock);
1393 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1394 		spin_unlock_irq(shost->host_lock);
1395 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1396 
1397 		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1398 		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1399 
1400 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1401 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1402 		lpfc_unreg_rpi(vport, ndlp);
1403 		return ndlp->nlp_state;
1404 	}
1405 
1406 	if (phba->sli_rev == LPFC_SLI_REV4) {
1407 		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1408 		if (rc) {
1409 			/* Stay in state and retry. */
1410 			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1411 			return ndlp->nlp_state;
1412 		}
1413 	}
1414 
1415 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1416 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1417 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1418 	} else {
1419 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1420 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1421 	}
1422 
1423 	return ndlp->nlp_state;
1424 }
1425 
1426 static uint32_t
1427 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1428 			   void *arg, uint32_t evt)
1429 {
1430 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1431 
1432 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1433 		spin_lock_irq(shost->host_lock);
1434 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1435 		spin_unlock_irq(shost->host_lock);
1436 		return ndlp->nlp_state;
1437 	} else {
1438 		/* software abort outstanding ADISC */
1439 		lpfc_els_abort(vport->phba, ndlp);
1440 
1441 		lpfc_drop_node(vport, ndlp);
1442 		return NLP_STE_FREED_NODE;
1443 	}
1444 }
1445 
1446 static uint32_t
1447 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1448 			      struct lpfc_nodelist *ndlp,
1449 			      void *arg,
1450 			      uint32_t evt)
1451 {
1452 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1453 	struct lpfc_hba  *phba = vport->phba;
1454 
1455 	/* Don't do anything that will mess up processing of the
1456 	 * previous RSCN.
1457 	 */
1458 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1459 		return ndlp->nlp_state;
1460 
1461 	/* software abort outstanding ADISC */
1462 	lpfc_els_abort(phba, ndlp);
1463 
1464 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1465 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1466 	spin_lock_irq(shost->host_lock);
1467 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1468 	spin_unlock_irq(shost->host_lock);
1469 	lpfc_disc_set_adisc(vport, ndlp);
1470 	return ndlp->nlp_state;
1471 }
1472 
1473 static uint32_t
1474 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1475 			      struct lpfc_nodelist *ndlp,
1476 			      void *arg,
1477 			      uint32_t evt)
1478 {
1479 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1480 
1481 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1482 	return ndlp->nlp_state;
1483 }
1484 
1485 static uint32_t
1486 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1487 			     struct lpfc_nodelist *ndlp,
1488 			     void *arg,
1489 			     uint32_t evt)
1490 {
1491 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1492 
1493 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1494 	return ndlp->nlp_state;
1495 }
1496 
1497 static uint32_t
1498 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1499 			     struct lpfc_nodelist *ndlp,
1500 			     void *arg,
1501 			     uint32_t evt)
1502 {
1503 	struct lpfc_hba   *phba = vport->phba;
1504 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1505 	LPFC_MBOXQ_t	  *mb;
1506 	LPFC_MBOXQ_t	  *nextmb;
1507 	struct lpfc_dmabuf *mp;
1508 
1509 	cmdiocb = (struct lpfc_iocbq *) arg;
1510 
1511 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1512 	if ((mb = phba->sli.mbox_active)) {
1513 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1514 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1515 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1516 			lpfc_nlp_put(ndlp);
1517 			mb->context2 = NULL;
1518 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1519 		}
1520 	}
1521 
1522 	spin_lock_irq(&phba->hbalock);
1523 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1524 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1525 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1526 			mp = (struct lpfc_dmabuf *) (mb->context1);
1527 			if (mp) {
1528 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1529 				kfree(mp);
1530 			}
1531 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1532 			lpfc_nlp_put(ndlp);
1533 			list_del(&mb->list);
1534 			phba->sli.mboxq_cnt--;
1535 			mempool_free(mb, phba->mbox_mem_pool);
1536 		}
1537 	}
1538 	spin_unlock_irq(&phba->hbalock);
1539 
1540 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1541 	return ndlp->nlp_state;
1542 }
1543 
1544 static uint32_t
1545 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1546 			       struct lpfc_nodelist *ndlp,
1547 			       void *arg,
1548 			       uint32_t evt)
1549 {
1550 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1551 
1552 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1553 	return ndlp->nlp_state;
1554 }
1555 
1556 static uint32_t
1557 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1558 			     struct lpfc_nodelist *ndlp,
1559 			     void *arg,
1560 			     uint32_t evt)
1561 {
1562 	struct lpfc_iocbq *cmdiocb;
1563 
1564 	cmdiocb = (struct lpfc_iocbq *) arg;
1565 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1566 	return ndlp->nlp_state;
1567 }
1568 
1569 static uint32_t
1570 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1571 				  struct lpfc_nodelist *ndlp,
1572 				  void *arg,
1573 				  uint32_t evt)
1574 {
1575 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1576 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1577 	MAILBOX_t *mb = &pmb->u.mb;
1578 	uint32_t did  = mb->un.varWords[1];
1579 
1580 	if (mb->mbxStatus) {
1581 		/* RegLogin failed */
1582 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1583 				"0246 RegLogin failed Data: x%x x%x x%x x%x "
1584 				 "x%x\n",
1585 				 did, mb->mbxStatus, vport->port_state,
1586 				 mb->un.varRegLogin.vpi,
1587 				 mb->un.varRegLogin.rpi);
1588 		/*
1589 		 * If RegLogin failed due to lack of HBA resources do not
1590 		 * retry discovery.
1591 		 */
1592 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1593 			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1594 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1595 			return ndlp->nlp_state;
1596 		}
1597 
1598 		/* Put ndlp in npr state set plogi timer for 1 sec */
1599 		mod_timer(&ndlp->nlp_delayfunc,
1600 			  jiffies + msecs_to_jiffies(1000 * 1));
1601 		spin_lock_irq(shost->host_lock);
1602 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1603 		spin_unlock_irq(shost->host_lock);
1604 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1605 
1606 		lpfc_issue_els_logo(vport, ndlp, 0);
1607 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1608 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1609 		return ndlp->nlp_state;
1610 	}
1611 
1612 	/* SLI4 ports have preallocated logical rpis. */
1613 	if (vport->phba->sli_rev < LPFC_SLI_REV4)
1614 		ndlp->nlp_rpi = mb->un.varWords[0];
1615 
1616 	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1617 
1618 	/* Only if we are not a fabric nport do we issue PRLI */
1619 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1620 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1621 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1622 		lpfc_issue_els_prli(vport, ndlp, 0);
1623 	} else {
1624 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1625 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1626 	}
1627 	return ndlp->nlp_state;
1628 }
1629 
1630 static uint32_t
1631 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1632 			      struct lpfc_nodelist *ndlp,
1633 			      void *arg,
1634 			      uint32_t evt)
1635 {
1636 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1637 
1638 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1639 		spin_lock_irq(shost->host_lock);
1640 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1641 		spin_unlock_irq(shost->host_lock);
1642 		return ndlp->nlp_state;
1643 	} else {
1644 		lpfc_drop_node(vport, ndlp);
1645 		return NLP_STE_FREED_NODE;
1646 	}
1647 }
1648 
1649 static uint32_t
1650 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1651 				 struct lpfc_nodelist *ndlp,
1652 				 void *arg,
1653 				 uint32_t evt)
1654 {
1655 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1656 
1657 	/* Don't do anything that will mess up processing of the
1658 	 * previous RSCN.
1659 	 */
1660 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1661 		return ndlp->nlp_state;
1662 
1663 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1664 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1665 	spin_lock_irq(shost->host_lock);
1666 	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1667 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1668 	spin_unlock_irq(shost->host_lock);
1669 	lpfc_disc_set_adisc(vport, ndlp);
1670 	return ndlp->nlp_state;
1671 }
1672 
1673 static uint32_t
1674 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1675 			  void *arg, uint32_t evt)
1676 {
1677 	struct lpfc_iocbq *cmdiocb;
1678 
1679 	cmdiocb = (struct lpfc_iocbq *) arg;
1680 
1681 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1682 	return ndlp->nlp_state;
1683 }
1684 
1685 static uint32_t
1686 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1687 			 void *arg, uint32_t evt)
1688 {
1689 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1690 
1691 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1692 	return ndlp->nlp_state;
1693 }
1694 
1695 static uint32_t
1696 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1697 			 void *arg, uint32_t evt)
1698 {
1699 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1700 
1701 	/* Software abort outstanding PRLI before sending acc */
1702 	lpfc_els_abort(vport->phba, ndlp);
1703 
1704 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1705 	return ndlp->nlp_state;
1706 }
1707 
1708 static uint32_t
1709 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1710 			   void *arg, uint32_t evt)
1711 {
1712 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1713 
1714 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1715 	return ndlp->nlp_state;
1716 }
1717 
1718 /* This routine is envoked when we rcv a PRLO request from a nport
1719  * we are logged into.  We should send back a PRLO rsp setting the
1720  * appropriate bits.
1721  * NEXT STATE = PRLI_ISSUE
1722  */
1723 static uint32_t
1724 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1725 			 void *arg, uint32_t evt)
1726 {
1727 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1728 
1729 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1730 	return ndlp->nlp_state;
1731 }
1732 
1733 static uint32_t
1734 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1735 			  void *arg, uint32_t evt)
1736 {
1737 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1738 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1739 	struct lpfc_hba   *phba = vport->phba;
1740 	IOCB_t *irsp;
1741 	PRLI *npr;
1742 
1743 	cmdiocb = (struct lpfc_iocbq *) arg;
1744 	rspiocb = cmdiocb->context_un.rsp_iocb;
1745 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1746 
1747 	irsp = &rspiocb->iocb;
1748 	if (irsp->ulpStatus) {
1749 		if ((vport->port_type == LPFC_NPIV_PORT) &&
1750 		    vport->cfg_restrict_login) {
1751 			goto out;
1752 		}
1753 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1754 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1755 		return ndlp->nlp_state;
1756 	}
1757 
1758 	/* Check out PRLI rsp */
1759 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1760 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1761 	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
1762 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1763 	    (npr->prliType == PRLI_FCP_TYPE)) {
1764 		if (npr->initiatorFunc)
1765 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1766 		if (npr->targetFunc) {
1767 			ndlp->nlp_type |= NLP_FCP_TARGET;
1768 			if (npr->writeXferRdyDis)
1769 				ndlp->nlp_flag |= NLP_FIRSTBURST;
1770 		}
1771 		if (npr->Retry)
1772 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1773 	}
1774 	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1775 	    (vport->port_type == LPFC_NPIV_PORT) &&
1776 	     vport->cfg_restrict_login) {
1777 out:
1778 		spin_lock_irq(shost->host_lock);
1779 		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1780 		spin_unlock_irq(shost->host_lock);
1781 		lpfc_issue_els_logo(vport, ndlp, 0);
1782 
1783 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1784 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1785 		return ndlp->nlp_state;
1786 	}
1787 
1788 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1789 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1790 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1791 	else
1792 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1793 	return ndlp->nlp_state;
1794 }
1795 
1796 /*! lpfc_device_rm_prli_issue
1797  *
1798  * \pre
1799  * \post
1800  * \param   phba
1801  * \param   ndlp
1802  * \param   arg
1803  * \param   evt
1804  * \return  uint32_t
1805  *
1806  * \b Description:
1807  *    This routine is envoked when we a request to remove a nport we are in the
1808  *    process of PRLIing. We should software abort outstanding prli, unreg
1809  *    login, send a logout. We will change node state to UNUSED_NODE, put it
1810  *    on plogi list so it can be freed when LOGO completes.
1811  *
1812  */
1813 
1814 static uint32_t
1815 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1816 			  void *arg, uint32_t evt)
1817 {
1818 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1819 
1820 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1821 		spin_lock_irq(shost->host_lock);
1822 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1823 		spin_unlock_irq(shost->host_lock);
1824 		return ndlp->nlp_state;
1825 	} else {
1826 		/* software abort outstanding PLOGI */
1827 		lpfc_els_abort(vport->phba, ndlp);
1828 
1829 		lpfc_drop_node(vport, ndlp);
1830 		return NLP_STE_FREED_NODE;
1831 	}
1832 }
1833 
1834 
1835 /*! lpfc_device_recov_prli_issue
1836  *
1837  * \pre
1838  * \post
1839  * \param   phba
1840  * \param   ndlp
1841  * \param   arg
1842  * \param   evt
1843  * \return  uint32_t
1844  *
1845  * \b Description:
1846  *    The routine is envoked when the state of a device is unknown, like
1847  *    during a link down. We should remove the nodelist entry from the
1848  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1849  *    outstanding PRLI command, then free the node entry.
1850  */
1851 static uint32_t
1852 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1853 			     struct lpfc_nodelist *ndlp,
1854 			     void *arg,
1855 			     uint32_t evt)
1856 {
1857 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1858 	struct lpfc_hba  *phba = vport->phba;
1859 
1860 	/* Don't do anything that will mess up processing of the
1861 	 * previous RSCN.
1862 	 */
1863 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1864 		return ndlp->nlp_state;
1865 
1866 	/* software abort outstanding PRLI */
1867 	lpfc_els_abort(phba, ndlp);
1868 
1869 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1870 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1871 	spin_lock_irq(shost->host_lock);
1872 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1873 	spin_unlock_irq(shost->host_lock);
1874 	lpfc_disc_set_adisc(vport, ndlp);
1875 	return ndlp->nlp_state;
1876 }
1877 
1878 static uint32_t
1879 lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1880 			  void *arg, uint32_t evt)
1881 {
1882 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1883 	struct ls_rjt     stat;
1884 
1885 	memset(&stat, 0, sizeof(struct ls_rjt));
1886 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1887 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1888 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1889 	return ndlp->nlp_state;
1890 }
1891 
1892 static uint32_t
1893 lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1894 			 void *arg, uint32_t evt)
1895 {
1896 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1897 	struct ls_rjt     stat;
1898 
1899 	memset(&stat, 0, sizeof(struct ls_rjt));
1900 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1901 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1902 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1903 	return ndlp->nlp_state;
1904 }
1905 
1906 static uint32_t
1907 lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1908 			 void *arg, uint32_t evt)
1909 {
1910 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1911 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1912 
1913 	spin_lock_irq(shost->host_lock);
1914 	ndlp->nlp_flag |= NLP_LOGO_ACC;
1915 	spin_unlock_irq(shost->host_lock);
1916 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1917 	return ndlp->nlp_state;
1918 }
1919 
1920 static uint32_t
1921 lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1922 			   void *arg, uint32_t evt)
1923 {
1924 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1925 	struct ls_rjt     stat;
1926 
1927 	memset(&stat, 0, sizeof(struct ls_rjt));
1928 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1929 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1930 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1931 	return ndlp->nlp_state;
1932 }
1933 
1934 static uint32_t
1935 lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1936 			 void *arg, uint32_t evt)
1937 {
1938 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1939 	struct ls_rjt     stat;
1940 
1941 	memset(&stat, 0, sizeof(struct ls_rjt));
1942 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1943 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1944 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1945 	return ndlp->nlp_state;
1946 }
1947 
1948 static uint32_t
1949 lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1950 			  void *arg, uint32_t evt)
1951 {
1952 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1953 
1954 	ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
1955 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1956 	spin_lock_irq(shost->host_lock);
1957 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1958 	spin_unlock_irq(shost->host_lock);
1959 	lpfc_disc_set_adisc(vport, ndlp);
1960 	return ndlp->nlp_state;
1961 }
1962 
1963 static uint32_t
1964 lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1965 			  void *arg, uint32_t evt)
1966 {
1967 	/*
1968 	 * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
1969 	 * timed out and is calling for Device Remove.  In this case, the LOGO
1970 	 * must be allowed to complete in state LOGO_ISSUE so that the rpi
1971 	 * and other NLP flags are correctly cleaned up.
1972 	 */
1973 	return ndlp->nlp_state;
1974 }
1975 
1976 static uint32_t
1977 lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1978 			     struct lpfc_nodelist *ndlp,
1979 			     void *arg, uint32_t evt)
1980 {
1981 	/*
1982 	 * Device Recovery events have no meaning for a node with a LOGO
1983 	 * outstanding.  The LOGO has to complete first and handle the
1984 	 * node from that point.
1985 	 */
1986 	return ndlp->nlp_state;
1987 }
1988 
1989 static uint32_t
1990 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1991 			  void *arg, uint32_t evt)
1992 {
1993 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1994 
1995 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1996 	return ndlp->nlp_state;
1997 }
1998 
1999 static uint32_t
2000 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2001 			 void *arg, uint32_t evt)
2002 {
2003 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2004 
2005 	lpfc_rcv_prli(vport, ndlp, cmdiocb);
2006 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2007 	return ndlp->nlp_state;
2008 }
2009 
2010 static uint32_t
2011 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2012 			 void *arg, uint32_t evt)
2013 {
2014 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2015 
2016 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2017 	return ndlp->nlp_state;
2018 }
2019 
2020 static uint32_t
2021 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2022 			   void *arg, uint32_t evt)
2023 {
2024 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2025 
2026 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2027 	return ndlp->nlp_state;
2028 }
2029 
2030 static uint32_t
2031 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2032 			 void *arg, uint32_t evt)
2033 {
2034 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2035 
2036 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
2037 	return ndlp->nlp_state;
2038 }
2039 
2040 static uint32_t
2041 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
2042 			     struct lpfc_nodelist *ndlp,
2043 			     void *arg,
2044 			     uint32_t evt)
2045 {
2046 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2047 
2048 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
2049 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2050 	spin_lock_irq(shost->host_lock);
2051 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2052 	spin_unlock_irq(shost->host_lock);
2053 	lpfc_disc_set_adisc(vport, ndlp);
2054 
2055 	return ndlp->nlp_state;
2056 }
2057 
2058 static uint32_t
2059 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2060 			   void *arg, uint32_t evt)
2061 {
2062 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2063 
2064 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2065 	return ndlp->nlp_state;
2066 }
2067 
2068 static uint32_t
2069 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2070 			  void *arg, uint32_t evt)
2071 {
2072 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2073 
2074 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2075 	return ndlp->nlp_state;
2076 }
2077 
2078 static uint32_t
2079 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2080 			  void *arg, uint32_t evt)
2081 {
2082 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2083 
2084 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2085 	return ndlp->nlp_state;
2086 }
2087 
2088 static uint32_t
2089 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2090 			    struct lpfc_nodelist *ndlp,
2091 			    void *arg, uint32_t evt)
2092 {
2093 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2094 
2095 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2096 	return ndlp->nlp_state;
2097 }
2098 
2099 static uint32_t
2100 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2101 			  void *arg, uint32_t evt)
2102 {
2103 	struct lpfc_hba  *phba = vport->phba;
2104 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2105 
2106 	/* flush the target */
2107 	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2108 			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2109 
2110 	/* Treat like rcv logo */
2111 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2112 	return ndlp->nlp_state;
2113 }
2114 
2115 static uint32_t
2116 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2117 			      struct lpfc_nodelist *ndlp,
2118 			      void *arg,
2119 			      uint32_t evt)
2120 {
2121 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2122 
2123 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2124 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2125 	spin_lock_irq(shost->host_lock);
2126 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2127 	spin_unlock_irq(shost->host_lock);
2128 	lpfc_disc_set_adisc(vport, ndlp);
2129 	return ndlp->nlp_state;
2130 }
2131 
2132 static uint32_t
2133 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2134 			void *arg, uint32_t evt)
2135 {
2136 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2137 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
2138 
2139 	/* Ignore PLOGI if we have an outstanding LOGO */
2140 	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2141 		return ndlp->nlp_state;
2142 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2143 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2144 		spin_lock_irq(shost->host_lock);
2145 		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2146 		spin_unlock_irq(shost->host_lock);
2147 	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2148 		/* send PLOGI immediately, move to PLOGI issue state */
2149 		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2150 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2151 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2152 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2153 		}
2154 	}
2155 	return ndlp->nlp_state;
2156 }
2157 
2158 static uint32_t
2159 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2160 		       void *arg, uint32_t evt)
2161 {
2162 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2163 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2164 	struct ls_rjt     stat;
2165 
2166 	memset(&stat, 0, sizeof (struct ls_rjt));
2167 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2168 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2169 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2170 
2171 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2172 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2173 			spin_lock_irq(shost->host_lock);
2174 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2175 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2176 			spin_unlock_irq(shost->host_lock);
2177 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2178 			lpfc_issue_els_adisc(vport, ndlp, 0);
2179 		} else {
2180 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2181 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2182 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2183 		}
2184 	}
2185 	return ndlp->nlp_state;
2186 }
2187 
2188 static uint32_t
2189 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
2190 		       void *arg, uint32_t evt)
2191 {
2192 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2193 
2194 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2195 	return ndlp->nlp_state;
2196 }
2197 
2198 static uint32_t
2199 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2200 			 void *arg, uint32_t evt)
2201 {
2202 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2203 
2204 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2205 	/*
2206 	 * Do not start discovery if discovery is about to start
2207 	 * or discovery in progress for this node. Starting discovery
2208 	 * here will affect the counting of discovery threads.
2209 	 */
2210 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2211 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2212 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2213 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2214 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2215 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2216 			lpfc_issue_els_adisc(vport, ndlp, 0);
2217 		} else {
2218 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2219 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2220 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2221 		}
2222 	}
2223 	return ndlp->nlp_state;
2224 }
2225 
2226 static uint32_t
2227 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2228 		       void *arg, uint32_t evt)
2229 {
2230 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2231 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2232 
2233 	spin_lock_irq(shost->host_lock);
2234 	ndlp->nlp_flag |= NLP_LOGO_ACC;
2235 	spin_unlock_irq(shost->host_lock);
2236 
2237 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2238 
2239 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2240 		mod_timer(&ndlp->nlp_delayfunc,
2241 			  jiffies + msecs_to_jiffies(1000 * 1));
2242 		spin_lock_irq(shost->host_lock);
2243 		ndlp->nlp_flag |= NLP_DELAY_TMO;
2244 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2245 		spin_unlock_irq(shost->host_lock);
2246 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2247 	} else {
2248 		spin_lock_irq(shost->host_lock);
2249 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2250 		spin_unlock_irq(shost->host_lock);
2251 	}
2252 	return ndlp->nlp_state;
2253 }
2254 
2255 static uint32_t
2256 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2257 			 void *arg, uint32_t evt)
2258 {
2259 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2260 	IOCB_t *irsp;
2261 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2262 
2263 	cmdiocb = (struct lpfc_iocbq *) arg;
2264 	rspiocb = cmdiocb->context_un.rsp_iocb;
2265 
2266 	irsp = &rspiocb->iocb;
2267 	if (irsp->ulpStatus) {
2268 		spin_lock_irq(shost->host_lock);
2269 		ndlp->nlp_flag |= NLP_DEFER_RM;
2270 		spin_unlock_irq(shost->host_lock);
2271 		return NLP_STE_FREED_NODE;
2272 	}
2273 	return ndlp->nlp_state;
2274 }
2275 
2276 static uint32_t
2277 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2278 			void *arg, uint32_t evt)
2279 {
2280 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2281 	IOCB_t *irsp;
2282 
2283 	cmdiocb = (struct lpfc_iocbq *) arg;
2284 	rspiocb = cmdiocb->context_un.rsp_iocb;
2285 
2286 	irsp = &rspiocb->iocb;
2287 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2288 		lpfc_drop_node(vport, ndlp);
2289 		return NLP_STE_FREED_NODE;
2290 	}
2291 	return ndlp->nlp_state;
2292 }
2293 
2294 static uint32_t
2295 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2296 			void *arg, uint32_t evt)
2297 {
2298 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2299 
2300 	/* For the fabric port just clear the fc flags. */
2301 	if (ndlp->nlp_DID == Fabric_DID) {
2302 		spin_lock_irq(shost->host_lock);
2303 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2304 		spin_unlock_irq(shost->host_lock);
2305 	}
2306 	lpfc_unreg_rpi(vport, ndlp);
2307 	return ndlp->nlp_state;
2308 }
2309 
2310 static uint32_t
2311 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2312 			 void *arg, uint32_t evt)
2313 {
2314 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2315 	IOCB_t *irsp;
2316 
2317 	cmdiocb = (struct lpfc_iocbq *) arg;
2318 	rspiocb = cmdiocb->context_un.rsp_iocb;
2319 
2320 	irsp = &rspiocb->iocb;
2321 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2322 		lpfc_drop_node(vport, ndlp);
2323 		return NLP_STE_FREED_NODE;
2324 	}
2325 	return ndlp->nlp_state;
2326 }
2327 
2328 static uint32_t
2329 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2330 			    struct lpfc_nodelist *ndlp,
2331 			    void *arg, uint32_t evt)
2332 {
2333 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2334 	MAILBOX_t    *mb = &pmb->u.mb;
2335 
2336 	if (!mb->mbxStatus) {
2337 		/* SLI4 ports have preallocated logical rpis. */
2338 		if (vport->phba->sli_rev < LPFC_SLI_REV4)
2339 			ndlp->nlp_rpi = mb->un.varWords[0];
2340 		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2341 		if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2342 			lpfc_unreg_rpi(vport, ndlp);
2343 		}
2344 	} else {
2345 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2346 			lpfc_drop_node(vport, ndlp);
2347 			return NLP_STE_FREED_NODE;
2348 		}
2349 	}
2350 	return ndlp->nlp_state;
2351 }
2352 
2353 static uint32_t
2354 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2355 			void *arg, uint32_t evt)
2356 {
2357 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2358 
2359 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2360 		spin_lock_irq(shost->host_lock);
2361 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2362 		spin_unlock_irq(shost->host_lock);
2363 		return ndlp->nlp_state;
2364 	}
2365 	lpfc_drop_node(vport, ndlp);
2366 	return NLP_STE_FREED_NODE;
2367 }
2368 
2369 static uint32_t
2370 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2371 			   void *arg, uint32_t evt)
2372 {
2373 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2374 
2375 	/* Don't do anything that will mess up processing of the
2376 	 * previous RSCN.
2377 	 */
2378 	if (vport->fc_flag & FC_RSCN_DEFERRED)
2379 		return ndlp->nlp_state;
2380 
2381 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2382 	spin_lock_irq(shost->host_lock);
2383 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2384 	spin_unlock_irq(shost->host_lock);
2385 	return ndlp->nlp_state;
2386 }
2387 
2388 
2389 /* This next section defines the NPort Discovery State Machine */
2390 
2391 /* There are 4 different double linked lists nodelist entries can reside on.
2392  * The plogi list and adisc list are used when Link Up discovery or RSCN
2393  * processing is needed. Each list holds the nodes that we will send PLOGI
2394  * or ADISC on. These lists will keep track of what nodes will be effected
2395  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2396  * The unmapped_list will contain all nodes that we have successfully logged
2397  * into at the Fibre Channel level. The mapped_list will contain all nodes
2398  * that are mapped FCP targets.
2399  */
2400 /*
2401  * The bind list is a list of undiscovered (potentially non-existent) nodes
2402  * that we have saved binding information on. This information is used when
2403  * nodes transition from the unmapped to the mapped list.
2404  */
2405 /* For UNUSED_NODE state, the node has just been allocated .
2406  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2407  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2408  * and put on the unmapped list. For ADISC processing, the node is taken off
2409  * the ADISC list and placed on either the mapped or unmapped list (depending
2410  * on its previous state). Once on the unmapped list, a PRLI is issued and the
2411  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2412  * changed to UNMAPPED_NODE. If the completion indicates a mapped
2413  * node, the node is taken off the unmapped list. The binding list is checked
2414  * for a valid binding, or a binding is automatically assigned. If binding
2415  * assignment is unsuccessful, the node is left on the unmapped list. If
2416  * binding assignment is successful, the associated binding list entry (if
2417  * any) is removed, and the node is placed on the mapped list.
2418  */
2419 /*
2420  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2421  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2422  * expire, all effected nodes will receive a DEVICE_RM event.
2423  */
2424 /*
2425  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2426  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2427  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2428  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2429  * we will first process the ADISC list.  32 entries are processed initially and
2430  * ADISC is initited for each one.  Completions / Events for each node are
2431  * funnelled thru the state machine.  As each node finishes ADISC processing, it
2432  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2433  * waiting, and the ADISC list count is identically 0, then we are done. For
2434  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2435  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2436  * list.  32 entries are processed initially and PLOGI is initited for each one.
2437  * Completions / Events for each node are funnelled thru the state machine.  As
2438  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2439  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2440  * indentically 0, then we are done. We have now completed discovery / RSCN
2441  * handling. Upon completion, ALL nodes should be on either the mapped or
2442  * unmapped lists.
2443  */
2444 
2445 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2446      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2447 	/* Action routine                  Event       Current State  */
2448 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2449 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2450 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2451 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2452 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2453 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2454 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2455 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2456 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2457 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2458 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2459 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2460 	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
2461 
2462 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2463 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2464 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2465 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2466 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2467 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2468 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2469 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2470 	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2471 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2472 	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2473 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2474 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2475 
2476 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2477 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2478 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2479 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2480 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2481 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2482 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2483 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2484 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2485 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2486 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2487 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2488 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2489 
2490 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2491 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2492 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2493 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2494 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2495 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2496 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2497 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2498 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2499 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2500 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2501 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2502 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2503 
2504 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2505 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2506 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2507 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2508 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2509 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2510 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2511 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2512 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2513 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2514 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2515 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2516 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2517 
2518 	lpfc_rcv_plogi_logo_issue,	/* RCV_PLOGI   LOGO_ISSUE     */
2519 	lpfc_rcv_prli_logo_issue,	/* RCV_PRLI        */
2520 	lpfc_rcv_logo_logo_issue,	/* RCV_LOGO        */
2521 	lpfc_rcv_padisc_logo_issue,	/* RCV_ADISC       */
2522 	lpfc_rcv_padisc_logo_issue,	/* RCV_PDISC       */
2523 	lpfc_rcv_prlo_logo_issue,	/* RCV_PRLO        */
2524 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2525 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2526 	lpfc_cmpl_logo_logo_issue,	/* CMPL_LOGO       */
2527 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2528 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2529 	lpfc_device_rm_logo_issue,	/* DEVICE_RM       */
2530 	lpfc_device_recov_logo_issue,	/* DEVICE_RECOVERY */
2531 
2532 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2533 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2534 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2535 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2536 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2537 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2538 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2539 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2540 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2541 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2542 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2543 	lpfc_disc_illegal,		/* DEVICE_RM       */
2544 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2545 
2546 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2547 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2548 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2549 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2550 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2551 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2552 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2553 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2554 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2555 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2556 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2557 	lpfc_disc_illegal,		/* DEVICE_RM       */
2558 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2559 
2560 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2561 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2562 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2563 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2564 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2565 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2566 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2567 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2568 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2569 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2570 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2571 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2572 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2573 };
2574 
2575 int
2576 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2577 			void *arg, uint32_t evt)
2578 {
2579 	uint32_t cur_state, rc;
2580 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2581 			 uint32_t);
2582 	uint32_t got_ndlp = 0;
2583 
2584 	if (lpfc_nlp_get(ndlp))
2585 		got_ndlp = 1;
2586 
2587 	cur_state = ndlp->nlp_state;
2588 
2589 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2590 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2591 			 "0211 DSM in event x%x on NPort x%x in "
2592 			 "state %d Data: x%x\n",
2593 			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2594 
2595 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2596 		 "DSM in:          evt:%d ste:%d did:x%x",
2597 		evt, cur_state, ndlp->nlp_DID);
2598 
2599 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2600 	rc = (func) (vport, ndlp, arg, evt);
2601 
2602 	/* DSM out state <rc> on NPort <nlp_DID> */
2603 	if (got_ndlp) {
2604 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2605 			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2606 			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2607 
2608 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2609 			"DSM out:         ste:%d did:x%x flg:x%x",
2610 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2611 		/* Decrement the ndlp reference count held for this function */
2612 		lpfc_nlp_put(ndlp);
2613 	} else {
2614 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2615 			"0213 DSM out state %d on NPort free\n", rc);
2616 
2617 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2618 			"DSM out:         ste:%d did:x%x flg:x%x",
2619 			rc, 0, 0);
2620 	}
2621 
2622 	return rc;
2623 }
2624