xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_node.c (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #include <emlxs.h>
28 
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_NODE_C);
32 
33 /* Timeout == -1 will enable the offline timer */
34 extern void
35 emlxs_node_close(emlxs_port_t *port, NODELIST *ndlp, uint32_t ringno,
36     int32_t timeout)
37 {
38 	emlxs_hba_t *hba = HBA;
39 	emlxs_config_t *cfg = &CFG;
40 	RING *rp;
41 	NODELIST *prev;
42 	uint32_t offline = 0;
43 
44 	/* If node is on a ring service queue, then remove it */
45 	mutex_enter(&EMLXS_RINGTX_LOCK);
46 
47 	/* Return if node destroyed */
48 	if (!ndlp || !ndlp->nlp_active) {
49 		mutex_exit(&EMLXS_RINGTX_LOCK);
50 
51 		return;
52 	}
53 
54 	/* Check offline support */
55 	if (timeout == -1) {
56 		if (cfg[CFG_OFFLINE_TIMEOUT].current) {
57 			timeout = cfg[CFG_OFFLINE_TIMEOUT].current;
58 			offline = 1;
59 		} else {
60 			timeout = 0;
61 		}
62 	}
63 
64 	if (ringno == FC_IP_RING) {
65 		/* Clear IP XRI */
66 		ndlp->nlp_Xri = 0;
67 	}
68 
69 	/* Check if node is already closed */
70 	if (ndlp->nlp_flag[ringno] & NLP_CLOSED) {
71 		if (ndlp->nlp_flag[ringno] & NLP_OFFLINE) {
72 			mutex_exit(&EMLXS_RINGTX_LOCK);
73 			return;
74 		}
75 
76 		if (offline) {
77 			ndlp->nlp_tics[ringno] = hba->timer_tics + timeout;
78 			ndlp->nlp_flag[ringno] |= NLP_OFFLINE;
79 			mutex_exit(&EMLXS_RINGTX_LOCK);
80 
81 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_closed_msg,
82 			    "node=%p did=%06x %s. offline=%d set.", ndlp,
83 			    ndlp->nlp_DID, emlxs_ring_xlate(ringno), timeout);
84 
85 		} else if (timeout) {
86 			ndlp->nlp_tics[ringno] = hba->timer_tics + timeout;
87 			mutex_exit(&EMLXS_RINGTX_LOCK);
88 
89 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_closed_msg,
90 			    "node=%p did=%06x %s. timeout=%d set.", ndlp,
91 			    ndlp->nlp_DID, emlxs_ring_xlate(ringno), timeout);
92 		} else {
93 			mutex_exit(&EMLXS_RINGTX_LOCK);
94 		}
95 
96 		return;
97 	}
98 
99 	/* Set the node closed */
100 	ndlp->nlp_flag[ringno] |= NLP_CLOSED;
101 
102 	if (offline) {
103 		ndlp->nlp_tics[ringno] = hba->timer_tics + timeout;
104 		ndlp->nlp_flag[ringno] |= NLP_OFFLINE;
105 
106 	} else if (timeout) {
107 		ndlp->nlp_tics[ringno] = hba->timer_tics + timeout;
108 
109 	}
110 
111 	if (ndlp->nlp_next[ringno]) {
112 		/* Remove node from ring queue */
113 		rp = &hba->ring[ringno];
114 
115 		/* If this is the only node on list */
116 		if (rp->nodeq.q_first == (void *)ndlp &&
117 		    rp->nodeq.q_last == (void *)ndlp) {
118 			rp->nodeq.q_last = NULL;
119 			rp->nodeq.q_first = NULL;
120 			rp->nodeq.q_cnt = 0;
121 		} else if (rp->nodeq.q_first == (void *)ndlp) {
122 			rp->nodeq.q_first = ndlp->nlp_next[ringno];
123 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
124 			    rp->nodeq.q_first;
125 			rp->nodeq.q_cnt--;
126 		} else {	/* This is a little more difficult */
127 
128 			/* Find the previous node in the circular ring queue */
129 			prev = ndlp;
130 			while (prev->nlp_next[ringno] != ndlp) {
131 				prev = prev->nlp_next[ringno];
132 			}
133 
134 			prev->nlp_next[ringno] = ndlp->nlp_next[ringno];
135 
136 			if (rp->nodeq.q_last == (void *)ndlp) {
137 				rp->nodeq.q_last = (void *)prev;
138 			}
139 			rp->nodeq.q_cnt--;
140 
141 		}
142 
143 		/* Clear node */
144 		ndlp->nlp_next[ringno] = NULL;
145 	}
146 
147 	mutex_exit(&EMLXS_RINGTX_LOCK);
148 
149 	return;
150 
151 }  /* emlxs_node_close() */
152 
153 
154 /* Called by emlxs_timer_check_nodes() */
155 extern void
156 emlxs_node_timeout(emlxs_port_t *port, NODELIST *ndlp, uint32_t ringno)
157 {
158 	emlxs_hba_t *hba = HBA;
159 
160 	/* If node needs servicing, then add it to the ring queues */
161 	mutex_enter(&EMLXS_RINGTX_LOCK);
162 
163 	/* Return if node destroyed */
164 	if (!ndlp || !ndlp->nlp_active) {
165 		mutex_exit(&EMLXS_RINGTX_LOCK);
166 		return;
167 	}
168 
169 	/* Open the node if not offline */
170 	if (!(ndlp->nlp_flag[ringno] & NLP_OFFLINE)) {
171 		mutex_exit(&EMLXS_RINGTX_LOCK);
172 
173 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_timeout_msg,
174 		    "node=%p did=%06x %s. Opening.", ndlp, ndlp->nlp_DID,
175 		    emlxs_ring_xlate(ringno));
176 
177 		emlxs_node_open(port, ndlp, ringno);
178 		return;
179 	}
180 
181 	/* OFFLINE TIMEOUT OCCURRED! */
182 
183 	/* Clear the timer */
184 	ndlp->nlp_tics[ringno] = 0;
185 
186 	mutex_exit(&EMLXS_RINGTX_LOCK);
187 
188 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_timeout_msg,
189 	    "node=%p did=%06x %s. Flushing.", ndlp, ndlp->nlp_DID,
190 	    emlxs_ring_xlate(ringno));
191 
192 	/* Flush tx queue for this ring */
193 	(void) emlxs_tx_node_flush(port, ndlp, &hba->ring[ringno], 0, 0);
194 
195 	/* Flush chip queue for this ring */
196 	(void) emlxs_chipq_node_flush(port, &hba->ring[ringno], ndlp, 0);
197 
198 	return;
199 
200 }  /* emlxs_node_timeout() */
201 
202 
203 extern void
204 emlxs_node_open(emlxs_port_t *port, NODELIST *ndlp, uint32_t ringno)
205 {
206 	emlxs_hba_t *hba = HBA;
207 	RING *rp;
208 	uint32_t found;
209 	NODELIST *nlp;
210 	MAILBOXQ *mbox;
211 	uint32_t i;
212 
213 	/* If node needs servicing, then add it to the ring queues */
214 	mutex_enter(&EMLXS_RINGTX_LOCK);
215 
216 	/* Return if node destroyed */
217 	if (!ndlp || !ndlp->nlp_active) {
218 		mutex_exit(&EMLXS_RINGTX_LOCK);
219 
220 		return;
221 	}
222 
223 	/* Return if node already open */
224 	if (!(ndlp->nlp_flag[ringno] & NLP_CLOSED)) {
225 		mutex_exit(&EMLXS_RINGTX_LOCK);
226 
227 		return;
228 	}
229 
230 	/* Set the node open (not closed) */
231 	ndlp->nlp_flag[ringno] &= ~(NLP_CLOSED|NLP_OFFLINE);
232 
233 	/* Clear the timer */
234 	ndlp->nlp_tics[ringno] = 0;
235 
236 	/*
237 	 * If the ptx or the tx queue needs servicing and
238 	 * the node is not already on the ring queue
239 	 */
240 	if ((ndlp->nlp_ptx[ringno].q_first || ndlp->nlp_tx[ringno].q_first) &&
241 	    !ndlp->nlp_next[ringno]) {
242 		rp = &hba->ring[ringno];
243 
244 		/* If so, then add it to the ring queue */
245 		if (rp->nodeq.q_first) {
246 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
247 			    (uint8_t *)ndlp;
248 			ndlp->nlp_next[ringno] = rp->nodeq.q_first;
249 
250 			/* If this is not the base node then */
251 			/* add it to the tail */
252 			if (!ndlp->nlp_base) {
253 				rp->nodeq.q_last = (uint8_t *)ndlp;
254 			} else {	/* Otherwise, add it to the head */
255 
256 				/* The command node always gets priority */
257 				rp->nodeq.q_first = (uint8_t *)ndlp;
258 			}
259 
260 			rp->nodeq.q_cnt++;
261 		} else {
262 			rp->nodeq.q_first = (uint8_t *)ndlp;
263 			rp->nodeq.q_last = (uint8_t *)ndlp;
264 			ndlp->nlp_next[ringno] = ndlp;
265 			rp->nodeq.q_cnt = 1;
266 		}
267 	}
268 
269 	mutex_exit(&EMLXS_RINGTX_LOCK);
270 
271 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_opened_msg,
272 	    "node=%p did=%06x %s.", ndlp, ndlp->nlp_DID,
273 	    emlxs_ring_xlate(ringno));
274 
275 	/* If link attention needs to be cleared */
276 	if ((hba->state == FC_LINK_UP) && (ringno == FC_FCP_RING)) {
277 
278 		/* Scan to see if any FCP2 devices are still closed */
279 		found = 0;
280 		rw_enter(&port->node_rwlock, RW_READER);
281 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
282 			nlp = port->node_table[i];
283 			while (nlp != NULL) {
284 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
285 				    (nlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED)) {
286 					found = 1;
287 					break;
288 
289 				}
290 				nlp = nlp->nlp_list_next;
291 			}
292 
293 			if (found) {
294 				break;
295 			}
296 		}
297 
298 		rw_exit(&port->node_rwlock);
299 
300 		if (!found) {
301 			/* Clear link attention */
302 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
303 			    MEM_MBOX | MEM_PRI))) {
304 				mutex_enter(&EMLXS_PORT_LOCK);
305 
306 				/*
307 				 * If state is not FC_LINK_UP, then either the
308 				 * link has gone down or a FC_CLEAR_LA has
309 				 * already been issued
310 				 */
311 				if (hba->state != FC_LINK_UP) {
312 					mutex_exit(&EMLXS_PORT_LOCK);
313 					(void) emlxs_mem_put(hba, MEM_MBOX,
314 					    (uint8_t *)mbox);
315 					goto done;
316 				}
317 
318 				emlxs_ffstate_change_locked(hba, FC_CLEAR_LA);
319 				hba->discovery_timer = 0;
320 				mutex_exit(&EMLXS_PORT_LOCK);
321 
322 				emlxs_mb_clear_la(hba, (MAILBOX *)mbox);
323 
324 				if (emlxs_sli_issue_mbox_cmd(hba,
325 				    (MAILBOX *)mbox, MBX_NOWAIT, 0) !=
326 				    MBX_BUSY) {
327 					(void) emlxs_mem_put(hba, MEM_MBOX,
328 					    (uint8_t *)mbox);
329 				}
330 			} else {
331 				/* Close the node and try again */
332 				/* in a few seconds */
333 				emlxs_node_close(port, ndlp, ringno, 5);
334 				return;
335 			}
336 		}
337 	}
338 
339 done:
340 
341 	/* Wake any sleeping threads */
342 	mutex_enter(&EMLXS_PKT_LOCK);
343 	cv_broadcast(&EMLXS_PKT_CV);
344 	mutex_exit(&EMLXS_PKT_LOCK);
345 
346 	return;
347 
348 }  /* emlxs_node_open() */
349 
350 
351 static int
352 emlxs_node_match_did(emlxs_port_t *port, NODELIST *ndlp, uint32_t did)
353 {
354 	D_ID mydid;
355 	D_ID odid;
356 	D_ID ndid;
357 
358 	if (ndlp->nlp_DID == did)
359 		return (1);
360 
361 	/*
362 	 * Next check for area/domain == 0 match
363 	 */
364 	mydid.un.word = port->did;
365 	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
366 		goto out;
367 	}
368 
369 	ndid.un.word = did;
370 	odid.un.word = ndlp->nlp_DID;
371 	if (ndid.un.b.id == odid.un.b.id) {
372 		if ((mydid.un.b.domain == ndid.un.b.domain) &&
373 		    (mydid.un.b.area == ndid.un.b.area)) {
374 			ndid.un.word = ndlp->nlp_DID;
375 			odid.un.word = did;
376 			if ((ndid.un.b.domain == 0) && (ndid.un.b.area == 0)) {
377 				return (1);
378 			}
379 			goto out;
380 		}
381 
382 		ndid.un.word = ndlp->nlp_DID;
383 		if ((mydid.un.b.domain == ndid.un.b.domain) &&
384 		    (mydid.un.b.area == ndid.un.b.area)) {
385 			odid.un.word = ndlp->nlp_DID;
386 			ndid.un.word = did;
387 			if ((ndid.un.b.domain == 0) && (ndid.un.b.area == 0)) {
388 				return (1);
389 			}
390 		}
391 	}
392 
393 out:
394 
395 	return (0);
396 
397 }  /* End emlxs_node_match_did */
398 
399 
400 
401 extern NODELIST *
402 emlxs_node_find_mac(emlxs_port_t *port, uint8_t *mac)
403 {
404 	NODELIST *nlp;
405 	uint32_t i;
406 
407 	rw_enter(&port->node_rwlock, RW_READER);
408 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
409 		nlp = port->node_table[i];
410 		while (nlp != NULL) {
411 			/*
412 			 * If portname matches mac address,
413 			 * return NODELIST entry
414 			 */
415 			if ((nlp->nlp_portname.IEEE[0] == mac[0])) {
416 				if ((nlp->nlp_DID != Bcast_DID) &&
417 				    ((nlp->nlp_DID & Fabric_DID_MASK) ==
418 				    Fabric_DID_MASK)) {
419 					nlp = (NODELIST *)nlp->nlp_list_next;
420 					continue;
421 				}
422 
423 				if ((nlp->nlp_portname.IEEE[1] == mac[1]) &&
424 				    (nlp->nlp_portname.IEEE[2] == mac[2]) &&
425 				    (nlp->nlp_portname.IEEE[3] == mac[3]) &&
426 				    (nlp->nlp_portname.IEEE[4] == mac[4]) &&
427 				    (nlp->nlp_portname.IEEE[5] == mac[5])) {
428 					rw_exit(&port->node_rwlock);
429 					return (nlp);
430 				}
431 
432 			}
433 
434 			nlp = (NODELIST *)nlp->nlp_list_next;
435 		}
436 	}
437 	rw_exit(&port->node_rwlock);
438 
439 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg,
440 	    "find: MAC=%02x%02x%02x%02x%02x%02x", mac[0], mac[1], mac[2],
441 	    mac[3], mac[4], mac[5]);
442 
443 	return (NULL);
444 
445 }  /* emlxs_node_find_mac() */
446 
447 
448 extern NODELIST *
449 emlxs_node_find_did(emlxs_port_t *port, uint32_t did)
450 {
451 	emlxs_hba_t *hba = HBA;
452 	NODELIST *nlp;
453 	uint32_t hash;
454 
455 	/* Check for invalid node ids  */
456 	if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
457 		return ((NODELIST *)0);
458 	}
459 
460 	if (did & 0xff000000) {
461 		return ((NODELIST *)0);
462 	}
463 
464 	/* Check for bcast node */
465 	if (did == Bcast_DID) {
466 		/* Use the base node here */
467 		return (&port->node_base);
468 	}
469 #ifdef MENLO_SUPPORT
470 	/* Check for menlo node */
471 	if (did == EMLXS_MENLO_DID) {
472 		/* Use the base node here */
473 		return (&port->node_base);
474 	}
475 #endif /* MENLO_SUPPORT */
476 
477 	/* Check for host node */
478 	if (did == port->did && !(hba->flag & FC_LOOPBACK_MODE)) {
479 		/* Use the base node here */
480 		return (&port->node_base);
481 	}
482 
483 	/*
484 	 * Convert well known fabric addresses to the Fabric_DID,
485 	 * since we don't login to some of them
486 	 */
487 	if ((did == SCR_DID)) {
488 		did = Fabric_DID;
489 	}
490 
491 	rw_enter(&port->node_rwlock, RW_READER);
492 	hash = EMLXS_DID_HASH(did);
493 	nlp = port->node_table[hash];
494 	while (nlp != NULL) {
495 		/* Check for obvious match */
496 		if (nlp->nlp_DID == did) {
497 			rw_exit(&port->node_rwlock);
498 			return (nlp);
499 		}
500 
501 		/* Check for detailed match */
502 		else if (emlxs_node_match_did(port, nlp, did)) {
503 			rw_exit(&port->node_rwlock);
504 			return (nlp);
505 		}
506 
507 		nlp = (NODELIST *)nlp->nlp_list_next;
508 	}
509 	rw_exit(&port->node_rwlock);
510 
511 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: did=%x",
512 	    did);
513 
514 	/* no match found */
515 	return ((NODELIST *)0);
516 
517 }  /* emlxs_node_find_did() */
518 
519 
520 extern NODELIST *
521 emlxs_node_find_rpi(emlxs_port_t *port, uint32_t rpi)
522 {
523 	NODELIST *nlp;
524 	uint32_t i;
525 
526 	rw_enter(&port->node_rwlock, RW_READER);
527 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
528 		nlp = port->node_table[i];
529 		while (nlp != NULL) {
530 			if (nlp->nlp_Rpi == rpi) {
531 				rw_exit(&port->node_rwlock);
532 				return (nlp);
533 			}
534 
535 			nlp = (NODELIST *)nlp->nlp_list_next;
536 		}
537 	}
538 	rw_exit(&port->node_rwlock);
539 
540 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: rpi=%x",
541 	    rpi);
542 
543 	/* no match found */
544 	return ((NODELIST *)0);
545 
546 }  /* emlxs_node_find_rpi() */
547 
548 
549 extern NODELIST *
550 emlxs_node_find_wwpn(emlxs_port_t *port, uint8_t *wwpn)
551 {
552 	NODELIST *nlp;
553 	uint32_t i;
554 	uint32_t j;
555 	uint8_t *bptr1;
556 	uint8_t *bptr2;
557 
558 	rw_enter(&port->node_rwlock, RW_READER);
559 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
560 		nlp = port->node_table[i];
561 		while (nlp != NULL) {
562 			bptr1 = (uint8_t *)&nlp->nlp_portname;
563 			bptr1 += 7;
564 			bptr2 = (uint8_t *)wwpn;
565 			bptr2 += 7;
566 
567 			for (j = 0; j < 8; j++) {
568 				if (*bptr1-- != *bptr2--) {
569 					break;
570 				}
571 			}
572 
573 			if (j == 8) {
574 				rw_exit(&port->node_rwlock);
575 				return (nlp);
576 			}
577 
578 			nlp = (NODELIST *)nlp->nlp_list_next;
579 		}
580 	}
581 	rw_exit(&port->node_rwlock);
582 
583 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg,
584 	    "find: wwpn=%02x%02x%02x%02x%02x%02x%02x%02x", wwpn[0], wwpn[1],
585 	    wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
586 
587 	/* no match found */
588 	return ((NODELIST *)0);
589 
590 }  /* emlxs_node_find_wwpn() */
591 
592 
593 extern NODELIST *
594 emlxs_node_find_index(emlxs_port_t *port, uint32_t index,
595     uint32_t nports_only)
596 {
597 	NODELIST *nlp;
598 	uint32_t i;
599 	uint32_t count;
600 
601 	rw_enter(&port->node_rwlock, RW_READER);
602 
603 	if (index > port->node_count - 1) {
604 		rw_exit(&port->node_rwlock);
605 		return (NULL);
606 	}
607 
608 	count = 0;
609 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
610 		nlp = port->node_table[i];
611 		while (nlp != NULL) {
612 			/* Skip fabric ports if requested */
613 			if (nports_only &&
614 			    (nlp->nlp_DID & 0xFFF000) == 0xFFF000) {
615 				nlp = (NODELIST *)nlp->nlp_list_next;
616 				continue;
617 			}
618 
619 			if (count == index) {
620 				rw_exit(&port->node_rwlock);
621 				return (nlp);
622 			}
623 
624 			nlp = (NODELIST *)nlp->nlp_list_next;
625 			count++;
626 		}
627 	}
628 	rw_exit(&port->node_rwlock);
629 
630 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: index=%d",
631 	    index);
632 
633 	/* no match found */
634 	return ((NODELIST *)0);
635 
636 }  /* emlxs_node_find_wwpn() */
637 
638 
639 extern uint32_t
640 emlxs_nport_count(emlxs_port_t *port)
641 {
642 	NODELIST *nlp;
643 	uint32_t i;
644 	uint32_t nport_count = 0;
645 
646 	rw_enter(&port->node_rwlock, RW_READER);
647 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
648 		nlp = port->node_table[i];
649 		while (nlp != NULL) {
650 			if ((nlp->nlp_DID & 0xFFF000) != 0xFFF000) {
651 				nport_count++;
652 			}
653 
654 			nlp = (NODELIST *)nlp->nlp_list_next;
655 		}
656 	}
657 	rw_exit(&port->node_rwlock);
658 
659 	return (nport_count);
660 
661 }  /* emlxs_nport_count() */
662 
663 
664 
665 extern void
666 emlxs_node_destroy_all(emlxs_port_t *port)
667 {
668 	emlxs_hba_t *hba = HBA;
669 	NODELIST *next;
670 	NODELIST *ndlp;
671 	uint8_t *wwn;
672 	uint32_t i;
673 
674 	/* Flush and free the nodes */
675 	rw_enter(&port->node_rwlock, RW_WRITER);
676 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
677 		ndlp = port->node_table[i];
678 		port->node_table[i] = 0;
679 		while (ndlp != NULL) {
680 			next = ndlp->nlp_list_next;
681 			ndlp->nlp_list_next = NULL;
682 			ndlp->nlp_list_prev = NULL;
683 			ndlp->nlp_active = 0;
684 
685 			if (port->node_count) {
686 				port->node_count--;
687 			}
688 
689 			wwn = (uint8_t *)&ndlp->nlp_portname;
690 			EMLXS_MSGF(EMLXS_CONTEXT,
691 			    &emlxs_node_destroy_msg, "did=%06x "
692 			    "rpi=%x wwpn=%02x%02x%02x%02x%02x%02x%02x%02x "
693 			    "count=%d", ndlp->nlp_DID, ndlp->nlp_Rpi, wwn[0],
694 			    wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6],
695 			    wwn[7], port->node_count);
696 
697 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
698 
699 			(void) emlxs_mem_put(hba, MEM_NLP, (uint8_t *)ndlp);
700 
701 			ndlp = next;
702 		}
703 	}
704 	port->node_count = 0;
705 	rw_exit(&port->node_rwlock);
706 
707 	/* Clean the base node */
708 	mutex_enter(&EMLXS_PORT_LOCK);
709 	port->node_base.nlp_list_next = NULL;
710 	port->node_base.nlp_list_prev = NULL;
711 	port->node_base.nlp_active = 1;
712 	mutex_exit(&EMLXS_PORT_LOCK);
713 
714 	/* Flush the base node */
715 	(void) emlxs_tx_node_flush(port, &port->node_base, 0, 1, 0);
716 	(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
717 
718 	return;
719 
720 }  /* emlxs_node_destroy_all() */
721 
722 
723 extern void
724 emlxs_node_add(emlxs_port_t *port, NODELIST *ndlp)
725 {
726 	NODELIST *np;
727 	uint8_t *wwn;
728 	uint32_t hash;
729 
730 	rw_enter(&port->node_rwlock, RW_WRITER);
731 	hash = EMLXS_DID_HASH(ndlp->nlp_DID);
732 	np = port->node_table[hash];
733 
734 	/*
735 	 * Insert node pointer to the head
736 	 */
737 	port->node_table[hash] = ndlp;
738 	if (!np) {
739 		ndlp->nlp_list_next = NULL;
740 	} else {
741 		ndlp->nlp_list_next = np;
742 	}
743 	port->node_count++;
744 
745 	wwn = (uint8_t *)&ndlp->nlp_portname;
746 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_msg,
747 	    "node=%p did=%06x rpi=%x wwpn=%02x%02x%02x%02x%02x%02x%02x%02x "
748 	    "count=%d", ndlp, ndlp->nlp_DID, ndlp->nlp_Rpi, wwn[0], wwn[1],
749 	    wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7], port->node_count);
750 
751 	rw_exit(&port->node_rwlock);
752 
753 	return;
754 
755 }  /* emlxs_node_add() */
756 
757 
758 extern void
759 emlxs_node_rm(emlxs_port_t *port, NODELIST *ndlp)
760 {
761 	emlxs_hba_t *hba = HBA;
762 	NODELIST *np;
763 	NODELIST *prevp;
764 	uint8_t *wwn;
765 	uint32_t hash;
766 
767 	rw_enter(&port->node_rwlock, RW_WRITER);
768 	hash = EMLXS_DID_HASH(ndlp->nlp_DID);
769 	np = port->node_table[hash];
770 	prevp = NULL;
771 	while (np != NULL) {
772 		if (np->nlp_DID == ndlp->nlp_DID) {
773 			if (prevp == NULL) {
774 				port->node_table[hash] = np->nlp_list_next;
775 			} else {
776 				prevp->nlp_list_next = np->nlp_list_next;
777 			}
778 
779 			if (port->node_count) {
780 				port->node_count--;
781 			}
782 
783 			wwn = (uint8_t *)&ndlp->nlp_portname;
784 			EMLXS_MSGF(EMLXS_CONTEXT,
785 			    &emlxs_node_destroy_msg, "did=%06x "
786 			    "rpi=%x wwpn=%02x%02x%02x%02x%02x%02x%02x%02x "
787 			    "count=%d", ndlp->nlp_DID, ndlp->nlp_Rpi, wwn[0],
788 			    wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6],
789 			    wwn[7], port->node_count);
790 
791 			(void) emlxs_tx_node_flush(port, ndlp, 0, 1, 0);
792 
793 			ndlp->nlp_active = 0;
794 			(void) emlxs_mem_put(hba, MEM_NLP, (uint8_t *)ndlp);
795 
796 			break;
797 		}
798 		prevp = np;
799 		np = np->nlp_list_next;
800 	}
801 	rw_exit(&port->node_rwlock);
802 
803 	return;
804 
805 }  /* emlxs_node_rm() */
806