xref: /illumos-gate/usr/src/uts/common/fs/nfs/nfs_auth.c (revision 44bc9120699af80bb18366ca474cb2c618608ca9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/errno.h>
29 #include <sys/vfs.h>
30 #include <sys/vnode.h>
31 #include <sys/cred.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
34 #include <sys/kmem.h>
35 #include <sys/pathname.h>
36 #include <sys/utsname.h>
37 #include <sys/debug.h>
38 #include <sys/door.h>
39 #include <sys/sdt.h>
40 #include <sys/thread.h>
41 #include <sys/avl.h>
42 
43 #include <rpc/types.h>
44 #include <rpc/auth.h>
45 #include <rpc/clnt.h>
46 
47 #include <nfs/nfs.h>
48 #include <nfs/export.h>
49 #include <nfs/nfs_clnt.h>
50 #include <nfs/auth.h>
51 
52 #define	EQADDR(a1, a2)  \
53 	(bcmp((char *)(a1)->buf, (char *)(a2)->buf, (a1)->len) == 0 && \
54 	(a1)->len == (a2)->len)
55 
56 static struct knetconfig auth_knconf;
57 static servinfo_t svp;
58 static clinfo_t ci;
59 
60 static struct kmem_cache *exi_cache_handle;
61 static void exi_cache_reclaim(void *);
62 static void exi_cache_trim(struct exportinfo *exi);
63 
64 extern pri_t minclsyspri;
65 
66 volatile uint_t nfsauth_cache_hit;
67 volatile uint_t nfsauth_cache_miss;
68 volatile uint_t nfsauth_cache_refresh;
69 volatile uint_t nfsauth_cache_reclaim;
70 
71 /*
72  * The lifetime of an auth cache entry:
73  * ------------------------------------
74  *
75  * An auth cache entry is created with both the auth_time
76  * and auth_freshness times set to the current time.
77  *
78  * Upon every client access which results in a hit, the
79  * auth_time will be updated.
80  *
81  * If a client access determines that the auth_freshness
82  * indicates that the entry is STALE, then it will be
83  * refreshed. Note that this will explicitly reset
84  * auth_time.
85  *
86  * When the REFRESH successfully occurs, then the
87  * auth_freshness is updated.
88  *
89  * There are two ways for an entry to leave the cache:
90  *
91  * 1) Purged by an action on the export (remove or changed)
92  * 2) Memory backpressure from the kernel (check against NFSAUTH_CACHE_TRIM)
93  *
94  * For 2) we check the timeout value against auth_time.
95  */
96 
97 /*
98  * Number of seconds until we mark for refresh an auth cache entry.
99  */
100 #define	NFSAUTH_CACHE_REFRESH 600
101 
102 /*
103  * Number of idle seconds until we yield to backpressure
104  * to trim a cache entry.
105  */
106 #define	NFSAUTH_CACHE_TRIM 3600
107 
108 /*
109  * While we could encapuslate the exi_list inside the
110  * exi structure, we can't do that for the auth_list.
111  * So, to keep things looking clean, we keep them both
112  * in these external lists.
113  */
114 typedef struct refreshq_exi_node {
115 	struct exportinfo	*ren_exi;
116 	list_t			ren_authlist;
117 	list_node_t		ren_node;
118 } refreshq_exi_node_t;
119 
120 typedef struct refreshq_auth_node {
121 	struct auth_cache	*ran_auth;
122 	char			*ran_netid;
123 	list_node_t		ran_node;
124 } refreshq_auth_node_t;
125 
126 /*
127  * Used to manipulate things on the refreshq_queue.
128  * Note that the refresh thread will effectively
129  * pop a node off of the queue, at which point it
130  * will no longer need to hold the mutex.
131  */
132 static kmutex_t refreshq_lock;
133 static list_t refreshq_queue;
134 static kcondvar_t refreshq_cv;
135 
136 /*
137  * If there is ever a problem with loading the
138  * module, then nfsauth_fini() needs to be called
139  * to remove state. In that event, since the
140  * refreshq thread has been started, they need to
141  * work together to get rid of state.
142  */
143 typedef enum nfsauth_refreshq_thread_state {
144 	REFRESHQ_THREAD_RUNNING,
145 	REFRESHQ_THREAD_FINI_REQ,
146 	REFRESHQ_THREAD_HALTED
147 } nfsauth_refreshq_thread_state_t;
148 
149 nfsauth_refreshq_thread_state_t
150 refreshq_thread_state = REFRESHQ_THREAD_HALTED;
151 
152 static void nfsauth_free_node(struct auth_cache *);
153 static void nfsauth_refresh_thread(void);
154 
155 static int nfsauth_cache_compar(const void *, const void *);
156 
157 /*
158  * mountd is a server-side only daemon. This will need to be
159  * revisited if the NFS server is ever made zones-aware.
160  */
161 kmutex_t	mountd_lock;
162 door_handle_t   mountd_dh;
163 
164 void
165 mountd_args(uint_t did)
166 {
167 	mutex_enter(&mountd_lock);
168 	if (mountd_dh != NULL)
169 		door_ki_rele(mountd_dh);
170 	mountd_dh = door_ki_lookup(did);
171 	mutex_exit(&mountd_lock);
172 }
173 
174 void
175 nfsauth_init(void)
176 {
177 	/*
178 	 * mountd can be restarted by smf(5). We need to make sure
179 	 * the updated door handle will safely make it to mountd_dh
180 	 */
181 	mutex_init(&mountd_lock, NULL, MUTEX_DEFAULT, NULL);
182 
183 	mutex_init(&refreshq_lock, NULL, MUTEX_DEFAULT, NULL);
184 	list_create(&refreshq_queue, sizeof (refreshq_exi_node_t),
185 	    offsetof(refreshq_exi_node_t, ren_node));
186 
187 	cv_init(&refreshq_cv, NULL, CV_DEFAULT, NULL);
188 
189 	/*
190 	 * Allocate nfsauth cache handle
191 	 */
192 	exi_cache_handle = kmem_cache_create("exi_cache_handle",
193 	    sizeof (struct auth_cache), 0, NULL, NULL,
194 	    exi_cache_reclaim, NULL, NULL, 0);
195 
196 	refreshq_thread_state = REFRESHQ_THREAD_RUNNING;
197 	(void) zthread_create(NULL, 0, nfsauth_refresh_thread,
198 	    NULL, 0, minclsyspri);
199 }
200 
201 /*
202  * Finalization routine for nfsauth. It is important to call this routine
203  * before destroying the exported_lock.
204  */
205 void
206 nfsauth_fini(void)
207 {
208 	refreshq_exi_node_t	*ren;
209 
210 	/*
211 	 * Prevent the nfsauth_refresh_thread from getting new
212 	 * work.
213 	 */
214 	mutex_enter(&refreshq_lock);
215 	if (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
216 		refreshq_thread_state = REFRESHQ_THREAD_FINI_REQ;
217 		cv_broadcast(&refreshq_cv);
218 
219 		/*
220 		 * Also, wait for nfsauth_refresh_thread() to exit.
221 		 */
222 		while (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
223 			cv_wait(&refreshq_cv, &refreshq_lock);
224 		}
225 	}
226 	mutex_exit(&refreshq_lock);
227 
228 	/*
229 	 * Walk the exi_list and in turn, walk the auth_lists and free all
230 	 * lists.  In addition, free INVALID auth_cache entries.
231 	 */
232 	while ((ren = list_remove_head(&refreshq_queue))) {
233 		refreshq_auth_node_t *ran;
234 
235 		while ((ran = list_remove_head(&ren->ren_authlist)) != NULL) {
236 			struct auth_cache *p = ran->ran_auth;
237 			if (p->auth_state == NFS_AUTH_INVALID)
238 				nfsauth_free_node(p);
239 			strfree(ran->ran_netid);
240 			kmem_free(ran, sizeof (refreshq_auth_node_t));
241 		}
242 
243 		list_destroy(&ren->ren_authlist);
244 		exi_rele(ren->ren_exi);
245 		kmem_free(ren, sizeof (refreshq_exi_node_t));
246 	}
247 	list_destroy(&refreshq_queue);
248 
249 	cv_destroy(&refreshq_cv);
250 	mutex_destroy(&refreshq_lock);
251 
252 	mutex_destroy(&mountd_lock);
253 
254 	/*
255 	 * Deallocate nfsauth cache handle
256 	 */
257 	kmem_cache_destroy(exi_cache_handle);
258 }
259 
260 /*
261  * Convert the address in a netbuf to
262  * a hash index for the auth_cache table.
263  */
264 static int
265 hash(struct netbuf *a)
266 {
267 	int i, h = 0;
268 
269 	for (i = 0; i < a->len; i++)
270 		h ^= a->buf[i];
271 
272 	return (h & (AUTH_TABLESIZE - 1));
273 }
274 
275 /*
276  * Mask out the components of an
277  * address that do not identify
278  * a host. For socket addresses the
279  * masking gets rid of the port number.
280  */
281 static void
282 addrmask(struct netbuf *addr, struct netbuf *mask)
283 {
284 	int i;
285 
286 	for (i = 0; i < addr->len; i++)
287 		addr->buf[i] &= mask->buf[i];
288 }
289 
290 /*
291  * nfsauth4_access is used for NFS V4 auth checking. Besides doing
292  * the common nfsauth_access(), it will check if the client can
293  * have a limited access to this vnode even if the security flavor
294  * used does not meet the policy.
295  */
296 int
297 nfsauth4_access(struct exportinfo *exi, vnode_t *vp, struct svc_req *req,
298     cred_t *cr, uid_t *uid, gid_t *gid, uint_t *ngids, gid_t **gids)
299 {
300 	int access;
301 
302 	access = nfsauth_access(exi, req, cr, uid, gid, ngids, gids);
303 
304 	/*
305 	 * There are cases that the server needs to allow the client
306 	 * to have a limited view.
307 	 *
308 	 * e.g.
309 	 * /export is shared as "sec=sys,rw=dfs-test-4,sec=krb5,rw"
310 	 * /export/home is shared as "sec=sys,rw"
311 	 *
312 	 * When the client mounts /export with sec=sys, the client
313 	 * would get a limited view with RO access on /export to see
314 	 * "home" only because the client is allowed to access
315 	 * /export/home with auth_sys.
316 	 */
317 	if (access & NFSAUTH_DENIED || access & NFSAUTH_WRONGSEC) {
318 		/*
319 		 * Allow ro permission with LIMITED view if there is a
320 		 * sub-dir exported under vp.
321 		 */
322 		if (has_visible(exi, vp))
323 			return (NFSAUTH_LIMITED);
324 	}
325 
326 	return (access);
327 }
328 
329 static void
330 sys_log(const char *msg)
331 {
332 	static time_t	tstamp = 0;
333 	time_t		now;
334 
335 	/*
336 	 * msg is shown (at most) once per minute
337 	 */
338 	now = gethrestime_sec();
339 	if ((tstamp + 60) < now) {
340 		tstamp = now;
341 		cmn_err(CE_WARN, msg);
342 	}
343 }
344 
345 /*
346  * Callup to the mountd to get access information in the kernel.
347  */
348 static bool_t
349 nfsauth_retrieve(struct exportinfo *exi, char *req_netid, int flavor,
350     struct netbuf *addr, int *access, uid_t clnt_uid, gid_t clnt_gid,
351     uint_t clnt_gids_cnt, const gid_t *clnt_gids, uid_t *srv_uid,
352     gid_t *srv_gid, uint_t *srv_gids_cnt, gid_t **srv_gids)
353 {
354 	varg_t			  varg = {0};
355 	nfsauth_res_t		  res = {0};
356 	XDR			  xdrs;
357 	size_t			  absz;
358 	caddr_t			  abuf;
359 	int			  last = 0;
360 	door_arg_t		  da;
361 	door_info_t		  di;
362 	door_handle_t		  dh;
363 	uint_t			  ntries = 0;
364 
365 	/*
366 	 * No entry in the cache for this client/flavor
367 	 * so we need to call the nfsauth service in the
368 	 * mount daemon.
369 	 */
370 
371 	varg.vers = V_PROTO;
372 	varg.arg_u.arg.cmd = NFSAUTH_ACCESS;
373 	varg.arg_u.arg.areq.req_client.n_len = addr->len;
374 	varg.arg_u.arg.areq.req_client.n_bytes = addr->buf;
375 	varg.arg_u.arg.areq.req_netid = req_netid;
376 	varg.arg_u.arg.areq.req_path = exi->exi_export.ex_path;
377 	varg.arg_u.arg.areq.req_flavor = flavor;
378 	varg.arg_u.arg.areq.req_clnt_uid = clnt_uid;
379 	varg.arg_u.arg.areq.req_clnt_gid = clnt_gid;
380 	varg.arg_u.arg.areq.req_clnt_gids.len = clnt_gids_cnt;
381 	varg.arg_u.arg.areq.req_clnt_gids.val = (gid_t *)clnt_gids;
382 
383 	DTRACE_PROBE1(nfsserv__func__nfsauth__varg, varg_t *, &varg);
384 
385 	/*
386 	 * Setup the XDR stream for encoding the arguments. Notice that
387 	 * in addition to the args having variable fields (req_netid and
388 	 * req_path), the argument data structure is itself versioned,
389 	 * so we need to make sure we can size the arguments buffer
390 	 * appropriately to encode all the args. If we can't get sizing
391 	 * info _or_ properly encode the arguments, there's really no
392 	 * point in continuting, so we fail the request.
393 	 */
394 	if ((absz = xdr_sizeof(xdr_varg, &varg)) == 0) {
395 		*access = NFSAUTH_DENIED;
396 		return (FALSE);
397 	}
398 
399 	abuf = (caddr_t)kmem_alloc(absz, KM_SLEEP);
400 	xdrmem_create(&xdrs, abuf, absz, XDR_ENCODE);
401 	if (!xdr_varg(&xdrs, &varg)) {
402 		XDR_DESTROY(&xdrs);
403 		goto fail;
404 	}
405 	XDR_DESTROY(&xdrs);
406 
407 	/*
408 	 * Prepare the door arguments
409 	 *
410 	 * We don't know the size of the message the daemon
411 	 * will pass back to us.  By setting rbuf to NULL,
412 	 * we force the door code to allocate a buf of the
413 	 * appropriate size.  We must set rsize > 0, however,
414 	 * else the door code acts as if no response was
415 	 * expected and doesn't pass the data to us.
416 	 */
417 	da.data_ptr = (char *)abuf;
418 	da.data_size = absz;
419 	da.desc_ptr = NULL;
420 	da.desc_num = 0;
421 	da.rbuf = NULL;
422 	da.rsize = 1;
423 
424 retry:
425 	mutex_enter(&mountd_lock);
426 	dh = mountd_dh;
427 	if (dh != NULL)
428 		door_ki_hold(dh);
429 	mutex_exit(&mountd_lock);
430 
431 	if (dh == NULL) {
432 		/*
433 		 * The rendezvous point has not been established yet!
434 		 * This could mean that either mountd(1m) has not yet
435 		 * been started or that _this_ routine nuked the door
436 		 * handle after receiving an EINTR for a REVOKED door.
437 		 *
438 		 * Returning NFSAUTH_DROP will cause the NFS client
439 		 * to retransmit the request, so let's try to be more
440 		 * rescillient and attempt for ntries before we bail.
441 		 */
442 		if (++ntries % NFSAUTH_DR_TRYCNT) {
443 			delay(hz);
444 			goto retry;
445 		}
446 
447 		kmem_free(abuf, absz);
448 
449 		sys_log("nfsauth: mountd has not established door");
450 		*access = NFSAUTH_DROP;
451 		return (FALSE);
452 	}
453 
454 	ntries = 0;
455 
456 	/*
457 	 * Now that we've got what we need, place the call.
458 	 */
459 	switch (door_ki_upcall_limited(dh, &da, NULL, SIZE_MAX, 0)) {
460 	case 0:				/* Success */
461 		door_ki_rele(dh);
462 
463 		if (da.data_ptr == NULL && da.data_size == 0) {
464 			/*
465 			 * The door_return that contained the data
466 			 * failed! We're here because of the 2nd
467 			 * door_return (w/o data) such that we can
468 			 * get control of the thread (and exit
469 			 * gracefully).
470 			 */
471 			DTRACE_PROBE1(nfsserv__func__nfsauth__door__nil,
472 			    door_arg_t *, &da);
473 			goto fail;
474 		}
475 
476 		break;
477 
478 	case EAGAIN:
479 		/*
480 		 * Server out of resources; back off for a bit
481 		 */
482 		door_ki_rele(dh);
483 		delay(hz);
484 		goto retry;
485 		/* NOTREACHED */
486 
487 	case EINTR:
488 		if (!door_ki_info(dh, &di)) {
489 			door_ki_rele(dh);
490 
491 			if (di.di_attributes & DOOR_REVOKED) {
492 				/*
493 				 * The server barfed and revoked
494 				 * the (existing) door on us; we
495 				 * want to wait to give smf(5) a
496 				 * chance to restart mountd(1m)
497 				 * and establish a new door handle.
498 				 */
499 				mutex_enter(&mountd_lock);
500 				if (dh == mountd_dh) {
501 					door_ki_rele(mountd_dh);
502 					mountd_dh = NULL;
503 				}
504 				mutex_exit(&mountd_lock);
505 				delay(hz);
506 				goto retry;
507 			}
508 			/*
509 			 * If the door was _not_ revoked on us,
510 			 * then more than likely we took an INTR,
511 			 * so we need to fail the operation.
512 			 */
513 			goto fail;
514 		}
515 		/*
516 		 * The only failure that can occur from getting
517 		 * the door info is EINVAL, so we let the code
518 		 * below handle it.
519 		 */
520 		/* FALLTHROUGH */
521 
522 	case EBADF:
523 	case EINVAL:
524 	default:
525 		/*
526 		 * If we have a stale door handle, give smf a last
527 		 * chance to start it by sleeping for a little bit.
528 		 * If we're still hosed, we'll fail the call.
529 		 *
530 		 * Since we're going to reacquire the door handle
531 		 * upon the retry, we opt to sleep for a bit and
532 		 * _not_ to clear mountd_dh. If mountd restarted
533 		 * and was able to set mountd_dh, we should see
534 		 * the new instance; if not, we won't get caught
535 		 * up in the retry/DELAY loop.
536 		 */
537 		door_ki_rele(dh);
538 		if (!last) {
539 			delay(hz);
540 			last++;
541 			goto retry;
542 		}
543 		sys_log("nfsauth: stale mountd door handle");
544 		goto fail;
545 	}
546 
547 	ASSERT(da.rbuf != NULL);
548 
549 	/*
550 	 * No door errors encountered; setup the XDR stream for decoding
551 	 * the results. If we fail to decode the results, we've got no
552 	 * other recourse than to fail the request.
553 	 */
554 	xdrmem_create(&xdrs, da.rbuf, da.rsize, XDR_DECODE);
555 	if (!xdr_nfsauth_res(&xdrs, &res)) {
556 		xdr_free(xdr_nfsauth_res, (char *)&res);
557 		XDR_DESTROY(&xdrs);
558 		kmem_free(da.rbuf, da.rsize);
559 		goto fail;
560 	}
561 	XDR_DESTROY(&xdrs);
562 	kmem_free(da.rbuf, da.rsize);
563 
564 	DTRACE_PROBE1(nfsserv__func__nfsauth__results, nfsauth_res_t *, &res);
565 	switch (res.stat) {
566 		case NFSAUTH_DR_OKAY:
567 			*access = res.ares.auth_perm;
568 			*srv_uid = res.ares.auth_srv_uid;
569 			*srv_gid = res.ares.auth_srv_gid;
570 			*srv_gids_cnt = res.ares.auth_srv_gids.len;
571 			*srv_gids = kmem_alloc(*srv_gids_cnt * sizeof (gid_t),
572 			    KM_SLEEP);
573 			bcopy(res.ares.auth_srv_gids.val, *srv_gids,
574 			    *srv_gids_cnt * sizeof (gid_t));
575 			break;
576 
577 		case NFSAUTH_DR_EFAIL:
578 		case NFSAUTH_DR_DECERR:
579 		case NFSAUTH_DR_BADCMD:
580 		default:
581 			xdr_free(xdr_nfsauth_res, (char *)&res);
582 fail:
583 			*access = NFSAUTH_DENIED;
584 			kmem_free(abuf, absz);
585 			return (FALSE);
586 			/* NOTREACHED */
587 	}
588 
589 	xdr_free(xdr_nfsauth_res, (char *)&res);
590 	kmem_free(abuf, absz);
591 
592 	return (TRUE);
593 }
594 
595 static void
596 nfsauth_refresh_thread(void)
597 {
598 	refreshq_exi_node_t	*ren;
599 	refreshq_auth_node_t	*ran;
600 
601 	struct exportinfo	*exi;
602 
603 	int			access;
604 	bool_t			retrieval;
605 
606 	callb_cpr_t		cprinfo;
607 
608 	CALLB_CPR_INIT(&cprinfo, &refreshq_lock, callb_generic_cpr,
609 	    "nfsauth_refresh");
610 
611 	for (;;) {
612 		mutex_enter(&refreshq_lock);
613 		if (refreshq_thread_state != REFRESHQ_THREAD_RUNNING) {
614 			/* Keep the hold on the lock! */
615 			break;
616 		}
617 
618 		ren = list_remove_head(&refreshq_queue);
619 		if (ren == NULL) {
620 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
621 			cv_wait(&refreshq_cv, &refreshq_lock);
622 			CALLB_CPR_SAFE_END(&cprinfo, &refreshq_lock);
623 			mutex_exit(&refreshq_lock);
624 			continue;
625 		}
626 		mutex_exit(&refreshq_lock);
627 
628 		exi = ren->ren_exi;
629 		ASSERT(exi != NULL);
630 
631 		/*
632 		 * Since the ren was removed from the refreshq_queue above,
633 		 * this is the only thread aware about the ren existence, so we
634 		 * have the exclusive ownership of it and we do not need to
635 		 * protect it by any lock.
636 		 */
637 		while ((ran = list_remove_head(&ren->ren_authlist))) {
638 			uid_t uid;
639 			gid_t gid;
640 			uint_t ngids;
641 			gid_t *gids;
642 			struct auth_cache *p = ran->ran_auth;
643 			char *netid = ran->ran_netid;
644 
645 			ASSERT(p != NULL);
646 			ASSERT(netid != NULL);
647 
648 			kmem_free(ran, sizeof (refreshq_auth_node_t));
649 
650 			mutex_enter(&p->auth_lock);
651 
652 			/*
653 			 * Once the entry goes INVALID, it can not change
654 			 * state.
655 			 *
656 			 * No need to refresh entries also in a case we are
657 			 * just shutting down.
658 			 *
659 			 * In general, there is no need to hold the
660 			 * refreshq_lock to test the refreshq_thread_state.  We
661 			 * do hold it at other places because there is some
662 			 * related thread synchronization (or some other tasks)
663 			 * close to the refreshq_thread_state check.
664 			 *
665 			 * The check for the refreshq_thread_state value here
666 			 * is purely advisory to allow the faster
667 			 * nfsauth_refresh_thread() shutdown.  In a case we
668 			 * will miss such advisory, nothing catastrophic
669 			 * happens: we will just spin longer here before the
670 			 * shutdown.
671 			 */
672 			if (p->auth_state == NFS_AUTH_INVALID ||
673 			    refreshq_thread_state != REFRESHQ_THREAD_RUNNING) {
674 				mutex_exit(&p->auth_lock);
675 
676 				if (p->auth_state == NFS_AUTH_INVALID)
677 					nfsauth_free_node(p);
678 
679 				strfree(netid);
680 
681 				continue;
682 			}
683 
684 			/*
685 			 * Make sure the state is valid.  Note that once we
686 			 * change the state to NFS_AUTH_REFRESHING, no other
687 			 * thread will be able to work on this entry.
688 			 */
689 			ASSERT(p->auth_state == NFS_AUTH_STALE);
690 
691 			p->auth_state = NFS_AUTH_REFRESHING;
692 			mutex_exit(&p->auth_lock);
693 
694 			DTRACE_PROBE2(nfsauth__debug__cache__refresh,
695 			    struct exportinfo *, exi,
696 			    struct auth_cache *, p);
697 
698 			/*
699 			 * The first caching of the access rights
700 			 * is done with the netid pulled out of the
701 			 * request from the client. All subsequent
702 			 * users of the cache may or may not have
703 			 * the same netid. It doesn't matter. So
704 			 * when we refresh, we simply use the netid
705 			 * of the request which triggered the
706 			 * refresh attempt.
707 			 */
708 			retrieval = nfsauth_retrieve(exi, netid,
709 			    p->auth_flavor, &p->auth_clnt->authc_addr, &access,
710 			    p->auth_clnt_uid, p->auth_clnt_gid,
711 			    p->auth_clnt_ngids, p->auth_clnt_gids, &uid, &gid,
712 			    &ngids, &gids);
713 
714 			/*
715 			 * This can only be set in one other place
716 			 * and the state has to be NFS_AUTH_FRESH.
717 			 */
718 			strfree(netid);
719 
720 			mutex_enter(&p->auth_lock);
721 			if (p->auth_state == NFS_AUTH_INVALID) {
722 				mutex_exit(&p->auth_lock);
723 				nfsauth_free_node(p);
724 				if (retrieval == TRUE)
725 					kmem_free(gids, ngids * sizeof (gid_t));
726 			} else {
727 				/*
728 				 * If we got an error, do not reset the
729 				 * time. This will cause the next access
730 				 * check for the client to reschedule this
731 				 * node.
732 				 */
733 				if (retrieval == TRUE) {
734 					p->auth_access = access;
735 
736 					p->auth_srv_uid = uid;
737 					p->auth_srv_gid = gid;
738 					kmem_free(p->auth_srv_gids,
739 					    p->auth_srv_ngids * sizeof (gid_t));
740 					p->auth_srv_ngids = ngids;
741 					p->auth_srv_gids = gids;
742 
743 					p->auth_freshness = gethrestime_sec();
744 				}
745 				p->auth_state = NFS_AUTH_FRESH;
746 
747 				cv_broadcast(&p->auth_cv);
748 				mutex_exit(&p->auth_lock);
749 			}
750 		}
751 
752 		list_destroy(&ren->ren_authlist);
753 		exi_rele(ren->ren_exi);
754 		kmem_free(ren, sizeof (refreshq_exi_node_t));
755 	}
756 
757 	refreshq_thread_state = REFRESHQ_THREAD_HALTED;
758 	cv_broadcast(&refreshq_cv);
759 	CALLB_CPR_EXIT(&cprinfo);
760 	zthread_exit();
761 }
762 
763 int
764 nfsauth_cache_clnt_compar(const void *v1, const void *v2)
765 {
766 	int c;
767 
768 	const struct auth_cache_clnt *a1 = (const struct auth_cache_clnt *)v1;
769 	const struct auth_cache_clnt *a2 = (const struct auth_cache_clnt *)v2;
770 
771 	if (a1->authc_addr.len < a2->authc_addr.len)
772 		return (-1);
773 	if (a1->authc_addr.len > a2->authc_addr.len)
774 		return (1);
775 
776 	c = memcmp(a1->authc_addr.buf, a2->authc_addr.buf, a1->authc_addr.len);
777 	if (c < 0)
778 		return (-1);
779 	if (c > 0)
780 		return (1);
781 
782 	return (0);
783 }
784 
785 static int
786 nfsauth_cache_compar(const void *v1, const void *v2)
787 {
788 	const struct auth_cache *a1 = (const struct auth_cache *)v1;
789 	const struct auth_cache *a2 = (const struct auth_cache *)v2;
790 
791 	if (a1->auth_flavor < a2->auth_flavor)
792 		return (-1);
793 	if (a1->auth_flavor > a2->auth_flavor)
794 		return (1);
795 
796 	if (a1->auth_clnt_uid < a2->auth_clnt_uid)
797 		return (-1);
798 	if (a1->auth_clnt_uid > a2->auth_clnt_uid)
799 		return (1);
800 
801 	if (a1->auth_clnt_gid < a2->auth_clnt_gid)
802 		return (-1);
803 	if (a1->auth_clnt_gid > a2->auth_clnt_gid)
804 		return (1);
805 
806 	return (0);
807 }
808 
809 /*
810  * Get the access information from the cache or callup to the mountd
811  * to get and cache the access information in the kernel.
812  */
813 static int
814 nfsauth_cache_get(struct exportinfo *exi, struct svc_req *req, int flavor,
815     cred_t *cr, uid_t *uid, gid_t *gid, uint_t *ngids, gid_t **gids)
816 {
817 	struct netbuf		*taddrmask;
818 	struct netbuf		addr;	/* temporary copy of client's address */
819 	const struct netbuf	*claddr;
820 	avl_tree_t		*tree;
821 	struct auth_cache	ac;	/* used as a template for avl_find() */
822 	struct auth_cache_clnt	*c;
823 	struct auth_cache_clnt	acc;	/* used as a template for avl_find() */
824 	struct auth_cache	*p = NULL;
825 	int			access;
826 
827 	uid_t			tmpuid;
828 	gid_t			tmpgid;
829 	uint_t			tmpngids;
830 	gid_t			*tmpgids;
831 
832 	avl_index_t		where;	/* used for avl_find()/avl_insert() */
833 
834 	ASSERT(cr != NULL);
835 
836 	/*
837 	 * Now check whether this client already
838 	 * has an entry for this flavor in the cache
839 	 * for this export.
840 	 * Get the caller's address, mask off the
841 	 * parts of the address that do not identify
842 	 * the host (port number, etc), and then hash
843 	 * it to find the chain of cache entries.
844 	 */
845 
846 	claddr = svc_getrpccaller(req->rq_xprt);
847 	addr = *claddr;
848 	addr.buf = kmem_alloc(addr.len, KM_SLEEP);
849 	bcopy(claddr->buf, addr.buf, claddr->len);
850 
851 	SVC_GETADDRMASK(req->rq_xprt, SVC_TATTR_ADDRMASK, (void **)&taddrmask);
852 	ASSERT(taddrmask != NULL);
853 	addrmask(&addr, taddrmask);
854 
855 	ac.auth_flavor = flavor;
856 	ac.auth_clnt_uid = crgetuid(cr);
857 	ac.auth_clnt_gid = crgetgid(cr);
858 
859 	acc.authc_addr = addr;
860 
861 	tree = exi->exi_cache[hash(&addr)];
862 
863 	rw_enter(&exi->exi_cache_lock, RW_READER);
864 	c = (struct auth_cache_clnt *)avl_find(tree, &acc, NULL);
865 
866 	if (c == NULL) {
867 		struct auth_cache_clnt *nc;
868 
869 		rw_exit(&exi->exi_cache_lock);
870 
871 		nc = kmem_alloc(sizeof (*nc), KM_NOSLEEP | KM_NORMALPRI);
872 		if (nc == NULL)
873 			goto retrieve;
874 
875 		/*
876 		 * Initialize the new auth_cache_clnt
877 		 */
878 		nc->authc_addr = addr;
879 		nc->authc_addr.buf = kmem_alloc(addr.len,
880 		    KM_NOSLEEP | KM_NORMALPRI);
881 		if (addr.len != 0 && nc->authc_addr.buf == NULL) {
882 			kmem_free(nc, sizeof (*nc));
883 			goto retrieve;
884 		}
885 		bcopy(addr.buf, nc->authc_addr.buf, addr.len);
886 		rw_init(&nc->authc_lock, NULL, RW_DEFAULT, NULL);
887 		avl_create(&nc->authc_tree, nfsauth_cache_compar,
888 		    sizeof (struct auth_cache),
889 		    offsetof(struct auth_cache, auth_link));
890 
891 		rw_enter(&exi->exi_cache_lock, RW_WRITER);
892 		c = (struct auth_cache_clnt *)avl_find(tree, &acc, &where);
893 		if (c == NULL) {
894 			avl_insert(tree, nc, where);
895 			rw_downgrade(&exi->exi_cache_lock);
896 			c = nc;
897 		} else {
898 			rw_downgrade(&exi->exi_cache_lock);
899 
900 			avl_destroy(&nc->authc_tree);
901 			rw_destroy(&nc->authc_lock);
902 			kmem_free(nc->authc_addr.buf, nc->authc_addr.len);
903 			kmem_free(nc, sizeof (*nc));
904 		}
905 	}
906 
907 	ASSERT(c != NULL);
908 
909 	rw_enter(&c->authc_lock, RW_READER);
910 	p = (struct auth_cache *)avl_find(&c->authc_tree, &ac, NULL);
911 
912 	if (p == NULL) {
913 		struct auth_cache *np;
914 
915 		rw_exit(&c->authc_lock);
916 
917 		np = kmem_cache_alloc(exi_cache_handle,
918 		    KM_NOSLEEP | KM_NORMALPRI);
919 		if (np == NULL) {
920 			rw_exit(&exi->exi_cache_lock);
921 			goto retrieve;
922 		}
923 
924 		/*
925 		 * Initialize the new auth_cache
926 		 */
927 		np->auth_clnt = c;
928 		np->auth_flavor = flavor;
929 		np->auth_clnt_uid = crgetuid(cr);
930 		np->auth_clnt_gid = crgetgid(cr);
931 		np->auth_clnt_ngids = 0;
932 		np->auth_clnt_gids = NULL;
933 		np->auth_srv_ngids = 0;
934 		np->auth_srv_gids = NULL;
935 		np->auth_time = np->auth_freshness = gethrestime_sec();
936 		np->auth_state = NFS_AUTH_NEW;
937 		mutex_init(&np->auth_lock, NULL, MUTEX_DEFAULT, NULL);
938 		cv_init(&np->auth_cv, NULL, CV_DEFAULT, NULL);
939 
940 		rw_enter(&c->authc_lock, RW_WRITER);
941 		rw_exit(&exi->exi_cache_lock);
942 
943 		p = (struct auth_cache *)avl_find(&c->authc_tree, &ac, &where);
944 		if (p == NULL) {
945 			avl_insert(&c->authc_tree, np, where);
946 			rw_downgrade(&c->authc_lock);
947 			p = np;
948 		} else {
949 			rw_downgrade(&c->authc_lock);
950 
951 			cv_destroy(&np->auth_cv);
952 			mutex_destroy(&np->auth_lock);
953 			kmem_cache_free(exi_cache_handle, np);
954 		}
955 	} else {
956 		rw_exit(&exi->exi_cache_lock);
957 	}
958 
959 	mutex_enter(&p->auth_lock);
960 	rw_exit(&c->authc_lock);
961 
962 wait:
963 	/*
964 	 * If the entry is in the WAITING state then some other thread is just
965 	 * retrieving the required info.  The entry was either NEW, or the list
966 	 * of client's supplemental groups is going to be changed (either by
967 	 * this thread, or by some other thread).  We need to wait until the
968 	 * nfsauth_retrieve() is done.
969 	 */
970 	while (p->auth_state == NFS_AUTH_WAITING)
971 		cv_wait(&p->auth_cv, &p->auth_lock);
972 
973 	/*
974 	 * Here the entry cannot be in WAITING or INVALID state.
975 	 */
976 	ASSERT(p->auth_state != NFS_AUTH_WAITING);
977 	ASSERT(p->auth_state != NFS_AUTH_INVALID);
978 
979 	/*
980 	 * In a case the client's list of supplemental groups changed (or, the
981 	 * list is not initialized yet) we need to (re)allocate it and make
982 	 * sure the auth_cache entry is (re)retrieved.
983 	 */
984 	if (p->auth_clnt_ngids != crgetngroups(cr) ||
985 	    bcmp(p->auth_clnt_gids, crgetgroups(cr),
986 	    p->auth_clnt_ngids * sizeof (gid_t)) != 0) {
987 
988 		/*
989 		 * If the refresh thread is just working on this entry then
990 		 * wait for it so we do not modify the list of supplemental
991 		 * groups in the middle of its processing.
992 		 */
993 		if (p->auth_state == NFS_AUTH_REFRESHING) {
994 			p->auth_state = NFS_AUTH_WAITING;
995 			goto wait;
996 		}
997 
998 		/*
999 		 * We won't modify (and use) the STALE entries here since they
1000 		 * are already in the refreshq_queue list.  Such entries will
1001 		 * be updated later.
1002 		 */
1003 		if (p->auth_state == NFS_AUTH_STALE) {
1004 			mutex_exit(&p->auth_lock);
1005 
1006 			p = NULL;
1007 
1008 			goto retrieve;
1009 		}
1010 
1011 		p->auth_state = NFS_AUTH_NEW;
1012 
1013 		/*
1014 		 * If the number of supplemental groups differ, we need to
1015 		 * reallocate first.
1016 		 */
1017 		if (p->auth_clnt_ngids != crgetngroups(cr)) {
1018 			kmem_free(p->auth_clnt_gids,
1019 			    p->auth_clnt_ngids * sizeof (gid_t));
1020 
1021 			p->auth_clnt_ngids = crgetngroups(cr);
1022 			p->auth_clnt_gids = kmem_alloc(
1023 			    p->auth_clnt_ngids * sizeof (gid_t),
1024 			    KM_NOSLEEP | KM_NORMALPRI);
1025 
1026 			/*
1027 			 * If we failed to preallocate the memory for
1028 			 * supplemental groups, we won't cache the retrieved
1029 			 * data.
1030 			 */
1031 			if (p->auth_clnt_ngids != 0 &&
1032 			    p->auth_clnt_gids == NULL)
1033 				p->auth_clnt_ngids = 0;
1034 				mutex_exit(&p->auth_lock);
1035 
1036 				p = NULL;
1037 
1038 				goto retrieve;
1039 		}
1040 
1041 		/*
1042 		 * Fill the client's supplemental groups.
1043 		 */
1044 		bcopy(crgetgroups(cr), p->auth_clnt_gids,
1045 		    p->auth_clnt_ngids * sizeof (gid_t));
1046 	}
1047 
1048 	/*
1049 	 * If the cache entry is not valid yet, we need to retrieve the
1050 	 * info ourselves.
1051 	 */
1052 	if (p->auth_state == NFS_AUTH_NEW) {
1053 		bool_t res;
1054 		/*
1055 		 * NFS_AUTH_NEW is the default output auth_state value in a
1056 		 * case we failed somewhere below.
1057 		 */
1058 		auth_state_t state = NFS_AUTH_NEW;
1059 
1060 		p->auth_state = NFS_AUTH_WAITING;
1061 		mutex_exit(&p->auth_lock);
1062 		kmem_free(addr.buf, addr.len);
1063 		addr = p->auth_clnt->authc_addr;
1064 
1065 		atomic_inc_uint(&nfsauth_cache_miss);
1066 
1067 		res = nfsauth_retrieve(exi, svc_getnetid(req->rq_xprt), flavor,
1068 		    &addr, &access, crgetuid(cr), crgetgid(cr),
1069 		    crgetngroups(cr), crgetgroups(cr), &tmpuid, &tmpgid,
1070 		    &tmpngids, &tmpgids);
1071 
1072 		p->auth_access = access;
1073 		p->auth_time = p->auth_freshness = gethrestime_sec();
1074 
1075 		if (res == TRUE) {
1076 			if (uid != NULL)
1077 				*uid = tmpuid;
1078 			if (gid != NULL)
1079 				*gid = tmpgid;
1080 			if (ngids != NULL && gids != NULL) {
1081 				*ngids = tmpngids;
1082 				*gids = tmpgids;
1083 
1084 				/*
1085 				 * We need a copy of gids for the
1086 				 * auth_cache entry
1087 				 */
1088 				tmpgids = kmem_alloc(tmpngids * sizeof (gid_t),
1089 				    KM_NOSLEEP | KM_NORMALPRI);
1090 				if (tmpgids != NULL)
1091 					bcopy(*gids, tmpgids,
1092 					    tmpngids * sizeof (gid_t));
1093 			}
1094 
1095 			if (tmpgids != NULL || tmpngids == 0) {
1096 				p->auth_srv_uid = tmpuid;
1097 				p->auth_srv_gid = tmpgid;
1098 				p->auth_srv_ngids = tmpngids;
1099 				p->auth_srv_gids = tmpgids;
1100 
1101 				state = NFS_AUTH_FRESH;
1102 			}
1103 		}
1104 
1105 		/*
1106 		 * Set the auth_state and notify waiters.
1107 		 */
1108 		mutex_enter(&p->auth_lock);
1109 		p->auth_state = state;
1110 		cv_broadcast(&p->auth_cv);
1111 		mutex_exit(&p->auth_lock);
1112 	} else {
1113 		uint_t nach;
1114 		time_t refresh;
1115 
1116 		refresh = gethrestime_sec() - p->auth_freshness;
1117 
1118 		p->auth_time = gethrestime_sec();
1119 
1120 		if (uid != NULL)
1121 			*uid = p->auth_srv_uid;
1122 		if (gid != NULL)
1123 			*gid = p->auth_srv_gid;
1124 		if (ngids != NULL && gids != NULL) {
1125 			*ngids = p->auth_srv_ngids;
1126 			*gids = kmem_alloc(*ngids * sizeof (gid_t), KM_SLEEP);
1127 			bcopy(p->auth_srv_gids, *gids, *ngids * sizeof (gid_t));
1128 		}
1129 
1130 		access = p->auth_access;
1131 
1132 		if ((refresh > NFSAUTH_CACHE_REFRESH) &&
1133 		    p->auth_state == NFS_AUTH_FRESH) {
1134 			refreshq_auth_node_t *ran;
1135 			uint_t nacr;
1136 
1137 			p->auth_state = NFS_AUTH_STALE;
1138 			mutex_exit(&p->auth_lock);
1139 
1140 			nacr = atomic_inc_uint_nv(&nfsauth_cache_refresh);
1141 			DTRACE_PROBE3(nfsauth__debug__cache__stale,
1142 			    struct exportinfo *, exi,
1143 			    struct auth_cache *, p,
1144 			    uint_t, nacr);
1145 
1146 			ran = kmem_alloc(sizeof (refreshq_auth_node_t),
1147 			    KM_SLEEP);
1148 			ran->ran_auth = p;
1149 			ran->ran_netid = strdup(svc_getnetid(req->rq_xprt));
1150 
1151 			mutex_enter(&refreshq_lock);
1152 			/*
1153 			 * We should not add a work queue
1154 			 * item if the thread is not
1155 			 * accepting them.
1156 			 */
1157 			if (refreshq_thread_state == REFRESHQ_THREAD_RUNNING) {
1158 				refreshq_exi_node_t *ren;
1159 
1160 				/*
1161 				 * Is there an existing exi_list?
1162 				 */
1163 				for (ren = list_head(&refreshq_queue);
1164 				    ren != NULL;
1165 				    ren = list_next(&refreshq_queue, ren)) {
1166 					if (ren->ren_exi == exi) {
1167 						list_insert_tail(
1168 						    &ren->ren_authlist, ran);
1169 						break;
1170 					}
1171 				}
1172 
1173 				if (ren == NULL) {
1174 					ren = kmem_alloc(
1175 					    sizeof (refreshq_exi_node_t),
1176 					    KM_SLEEP);
1177 
1178 					exi_hold(exi);
1179 					ren->ren_exi = exi;
1180 
1181 					list_create(&ren->ren_authlist,
1182 					    sizeof (refreshq_auth_node_t),
1183 					    offsetof(refreshq_auth_node_t,
1184 					    ran_node));
1185 
1186 					list_insert_tail(&ren->ren_authlist,
1187 					    ran);
1188 					list_insert_tail(&refreshq_queue, ren);
1189 				}
1190 
1191 				cv_broadcast(&refreshq_cv);
1192 			} else {
1193 				strfree(ran->ran_netid);
1194 				kmem_free(ran, sizeof (refreshq_auth_node_t));
1195 			}
1196 
1197 			mutex_exit(&refreshq_lock);
1198 		} else {
1199 			mutex_exit(&p->auth_lock);
1200 		}
1201 
1202 		nach = atomic_inc_uint_nv(&nfsauth_cache_hit);
1203 		DTRACE_PROBE2(nfsauth__debug__cache__hit,
1204 		    uint_t, nach,
1205 		    time_t, refresh);
1206 
1207 		kmem_free(addr.buf, addr.len);
1208 	}
1209 
1210 	return (access);
1211 
1212 retrieve:
1213 	/*
1214 	 * Retrieve the required data without caching.
1215 	 */
1216 
1217 	ASSERT(p == NULL);
1218 
1219 	atomic_inc_uint(&nfsauth_cache_miss);
1220 
1221 	if (nfsauth_retrieve(exi, svc_getnetid(req->rq_xprt), flavor, &addr,
1222 	    &access, crgetuid(cr), crgetgid(cr), crgetngroups(cr),
1223 	    crgetgroups(cr), &tmpuid, &tmpgid, &tmpngids, &tmpgids)) {
1224 		if (uid != NULL)
1225 			*uid = tmpuid;
1226 		if (gid != NULL)
1227 			*gid = tmpgid;
1228 		if (ngids != NULL && gids != NULL) {
1229 			*ngids = tmpngids;
1230 			*gids = tmpgids;
1231 		} else {
1232 			kmem_free(tmpgids, tmpngids * sizeof (gid_t));
1233 		}
1234 	}
1235 
1236 	kmem_free(addr.buf, addr.len);
1237 
1238 	return (access);
1239 }
1240 
1241 /*
1242  * Check if the requesting client has access to the filesystem with
1243  * a given nfs flavor number which is an explicitly shared flavor.
1244  */
1245 int
1246 nfsauth4_secinfo_access(struct exportinfo *exi, struct svc_req *req,
1247 			int flavor, int perm, cred_t *cr)
1248 {
1249 	int access;
1250 
1251 	if (! (perm & M_4SEC_EXPORTED)) {
1252 		return (NFSAUTH_DENIED);
1253 	}
1254 
1255 	/*
1256 	 * Optimize if there are no lists
1257 	 */
1258 	if ((perm & (M_ROOT | M_NONE | M_MAP)) == 0) {
1259 		perm &= ~M_4SEC_EXPORTED;
1260 		if (perm == M_RO)
1261 			return (NFSAUTH_RO);
1262 		if (perm == M_RW)
1263 			return (NFSAUTH_RW);
1264 	}
1265 
1266 	access = nfsauth_cache_get(exi, req, flavor, cr, NULL, NULL, NULL,
1267 	    NULL);
1268 
1269 	return (access);
1270 }
1271 
1272 int
1273 nfsauth_access(struct exportinfo *exi, struct svc_req *req, cred_t *cr,
1274     uid_t *uid, gid_t *gid, uint_t *ngids, gid_t **gids)
1275 {
1276 	int access, mapaccess;
1277 	struct secinfo *sp;
1278 	int i, flavor, perm;
1279 	int authnone_entry = -1;
1280 
1281 	/*
1282 	 * By default root is mapped to anonymous user.
1283 	 * This might get overriden later in nfsauth_cache_get().
1284 	 */
1285 	if (crgetuid(cr) == 0) {
1286 		if (uid != NULL)
1287 			*uid = exi->exi_export.ex_anon;
1288 		if (gid != NULL)
1289 			*gid = exi->exi_export.ex_anon;
1290 	} else {
1291 		if (uid != NULL)
1292 			*uid = crgetuid(cr);
1293 		if (gid != NULL)
1294 			*gid = crgetgid(cr);
1295 	}
1296 
1297 	if (ngids != NULL)
1298 		*ngids = 0;
1299 	if (gids != NULL)
1300 		*gids = NULL;
1301 
1302 	/*
1303 	 *  Get the nfs flavor number from xprt.
1304 	 */
1305 	flavor = (int)(uintptr_t)req->rq_xprt->xp_cookie;
1306 
1307 	/*
1308 	 * First check the access restrictions on the filesystem.  If
1309 	 * there are no lists associated with this flavor then there's no
1310 	 * need to make an expensive call to the nfsauth service or to
1311 	 * cache anything.
1312 	 */
1313 
1314 	sp = exi->exi_export.ex_secinfo;
1315 	for (i = 0; i < exi->exi_export.ex_seccnt; i++) {
1316 		if (flavor != sp[i].s_secinfo.sc_nfsnum) {
1317 			if (sp[i].s_secinfo.sc_nfsnum == AUTH_NONE)
1318 				authnone_entry = i;
1319 			continue;
1320 		}
1321 		break;
1322 	}
1323 
1324 	mapaccess = 0;
1325 
1326 	if (i >= exi->exi_export.ex_seccnt) {
1327 		/*
1328 		 * Flavor not found, but use AUTH_NONE if it exists
1329 		 */
1330 		if (authnone_entry == -1)
1331 			return (NFSAUTH_DENIED);
1332 		flavor = AUTH_NONE;
1333 		mapaccess = NFSAUTH_MAPNONE;
1334 		i = authnone_entry;
1335 	}
1336 
1337 	/*
1338 	 * If the flavor is in the ex_secinfo list, but not an explicitly
1339 	 * shared flavor by the user, it is a result of the nfsv4 server
1340 	 * namespace setup. We will grant an RO permission similar for
1341 	 * a pseudo node except that this node is a shared one.
1342 	 *
1343 	 * e.g. flavor in (flavor) indicates that it is not explictly
1344 	 *	shared by the user:
1345 	 *
1346 	 *		/	(sys, krb5)
1347 	 *		|
1348 	 *		export  #share -o sec=sys (krb5)
1349 	 *		|
1350 	 *		secure  #share -o sec=krb5
1351 	 *
1352 	 *	In this case, when a krb5 request coming in to access
1353 	 *	/export, RO permission is granted.
1354 	 */
1355 	if (!(sp[i].s_flags & M_4SEC_EXPORTED))
1356 		return (mapaccess | NFSAUTH_RO);
1357 
1358 	/*
1359 	 * Optimize if there are no lists.
1360 	 * We cannot optimize for AUTH_SYS with NGRPS (16) supplemental groups.
1361 	 */
1362 	perm = sp[i].s_flags;
1363 	if ((perm & (M_ROOT | M_NONE | M_MAP)) == 0 && (ngroups_max <= NGRPS ||
1364 	    flavor != AUTH_SYS || crgetngroups(cr) < NGRPS)) {
1365 		perm &= ~M_4SEC_EXPORTED;
1366 		if (perm == M_RO)
1367 			return (mapaccess | NFSAUTH_RO);
1368 		if (perm == M_RW)
1369 			return (mapaccess | NFSAUTH_RW);
1370 	}
1371 
1372 	access = nfsauth_cache_get(exi, req, flavor, cr, uid, gid, ngids, gids);
1373 
1374 	/*
1375 	 * For both NFSAUTH_DENIED and NFSAUTH_WRONGSEC we do not care about
1376 	 * the supplemental groups.
1377 	 */
1378 	if (access & NFSAUTH_DENIED || access & NFSAUTH_WRONGSEC) {
1379 		if (ngids != NULL && gids != NULL) {
1380 			kmem_free(*gids, *ngids * sizeof (gid_t));
1381 			*ngids = 0;
1382 			*gids = NULL;
1383 		}
1384 	}
1385 
1386 	/*
1387 	 * Client's security flavor doesn't match with "ro" or
1388 	 * "rw" list. Try again using AUTH_NONE if present.
1389 	 */
1390 	if ((access & NFSAUTH_WRONGSEC) && (flavor != AUTH_NONE)) {
1391 		/*
1392 		 * Have we already encountered AUTH_NONE ?
1393 		 */
1394 		if (authnone_entry != -1) {
1395 			mapaccess = NFSAUTH_MAPNONE;
1396 			access = nfsauth_cache_get(exi, req, AUTH_NONE, cr,
1397 			    NULL, NULL, NULL, NULL);
1398 		} else {
1399 			/*
1400 			 * Check for AUTH_NONE presence.
1401 			 */
1402 			for (; i < exi->exi_export.ex_seccnt; i++) {
1403 				if (sp[i].s_secinfo.sc_nfsnum == AUTH_NONE) {
1404 					mapaccess = NFSAUTH_MAPNONE;
1405 					access = nfsauth_cache_get(exi, req,
1406 					    AUTH_NONE, cr, NULL, NULL, NULL,
1407 					    NULL);
1408 					break;
1409 				}
1410 			}
1411 		}
1412 	}
1413 
1414 	if (access & NFSAUTH_DENIED)
1415 		access = NFSAUTH_DENIED;
1416 
1417 	return (access | mapaccess);
1418 }
1419 
1420 static void
1421 nfsauth_free_clnt_node(struct auth_cache_clnt *p)
1422 {
1423 	void *cookie = NULL;
1424 	struct auth_cache *node;
1425 
1426 	while ((node = avl_destroy_nodes(&p->authc_tree, &cookie)) != NULL)
1427 		nfsauth_free_node(node);
1428 	avl_destroy(&p->authc_tree);
1429 
1430 	kmem_free(p->authc_addr.buf, p->authc_addr.len);
1431 	rw_destroy(&p->authc_lock);
1432 
1433 	kmem_free(p, sizeof (*p));
1434 }
1435 
1436 static void
1437 nfsauth_free_node(struct auth_cache *p)
1438 {
1439 	kmem_free(p->auth_clnt_gids, p->auth_clnt_ngids * sizeof (gid_t));
1440 	kmem_free(p->auth_srv_gids, p->auth_srv_ngids * sizeof (gid_t));
1441 	mutex_destroy(&p->auth_lock);
1442 	cv_destroy(&p->auth_cv);
1443 	kmem_cache_free(exi_cache_handle, p);
1444 }
1445 
1446 /*
1447  * Free the nfsauth cache for a given export
1448  */
1449 void
1450 nfsauth_cache_free(struct exportinfo *exi)
1451 {
1452 	int i;
1453 
1454 	/*
1455 	 * The only way we got here was with an exi_rele, which means that no
1456 	 * auth cache entry is being refreshed.
1457 	 */
1458 
1459 	for (i = 0; i < AUTH_TABLESIZE; i++) {
1460 		avl_tree_t *tree = exi->exi_cache[i];
1461 		void *cookie = NULL;
1462 		struct auth_cache_clnt *node;
1463 
1464 		while ((node = avl_destroy_nodes(tree, &cookie)) != NULL)
1465 			nfsauth_free_clnt_node(node);
1466 	}
1467 }
1468 
1469 /*
1470  * Called by the kernel memory allocator when
1471  * memory is low. Free unused cache entries.
1472  * If that's not enough, the VM system will
1473  * call again for some more.
1474  */
1475 /*ARGSUSED*/
1476 void
1477 exi_cache_reclaim(void *cdrarg)
1478 {
1479 	int i;
1480 	struct exportinfo *exi;
1481 
1482 	rw_enter(&exported_lock, RW_READER);
1483 
1484 	for (i = 0; i < EXPTABLESIZE; i++) {
1485 		for (exi = exptable[i]; exi; exi = exi->fid_hash.next) {
1486 			exi_cache_trim(exi);
1487 		}
1488 	}
1489 
1490 	rw_exit(&exported_lock);
1491 
1492 	atomic_inc_uint(&nfsauth_cache_reclaim);
1493 }
1494 
1495 void
1496 exi_cache_trim(struct exportinfo *exi)
1497 {
1498 	struct auth_cache_clnt *c;
1499 	struct auth_cache_clnt *nextc;
1500 	struct auth_cache *p;
1501 	struct auth_cache *next;
1502 	int i;
1503 	time_t stale_time;
1504 	avl_tree_t *tree;
1505 
1506 	for (i = 0; i < AUTH_TABLESIZE; i++) {
1507 
1508 		tree = exi->exi_cache[i];
1509 		stale_time = gethrestime_sec() - NFSAUTH_CACHE_TRIM;
1510 
1511 		rw_enter(&exi->exi_cache_lock, RW_READER);
1512 
1513 		/*
1514 		 * Free entries that have not been
1515 		 * used for NFSAUTH_CACHE_TRIM seconds.
1516 		 */
1517 		for (c = avl_first(tree); c != NULL; c = AVL_NEXT(tree, c)) {
1518 			rw_enter(&c->authc_lock, RW_WRITER);
1519 			for (p = avl_first(&c->authc_tree); p != NULL;
1520 			    p = next) {
1521 				next = AVL_NEXT(&c->authc_tree, p);
1522 
1523 				ASSERT(p->auth_state != NFS_AUTH_INVALID);
1524 
1525 				mutex_enter(&p->auth_lock);
1526 
1527 				/*
1528 				 * We won't trim recently used and/or WAITING
1529 				 * entries.
1530 				 */
1531 				if (p->auth_time > stale_time ||
1532 				    p->auth_state == NFS_AUTH_WAITING) {
1533 					mutex_exit(&p->auth_lock);
1534 					continue;
1535 				}
1536 
1537 				DTRACE_PROBE1(nfsauth__debug__trim__state,
1538 				    auth_state_t, p->auth_state);
1539 
1540 				/*
1541 				 * STALE and REFRESHING entries needs to be
1542 				 * marked INVALID only because they are
1543 				 * referenced by some other structures or
1544 				 * threads.  They will be freed later.
1545 				 */
1546 				if (p->auth_state == NFS_AUTH_STALE ||
1547 				    p->auth_state == NFS_AUTH_REFRESHING) {
1548 					p->auth_state = NFS_AUTH_INVALID;
1549 					mutex_exit(&p->auth_lock);
1550 
1551 					avl_remove(&c->authc_tree, p);
1552 				} else {
1553 					mutex_exit(&p->auth_lock);
1554 
1555 					avl_remove(&c->authc_tree, p);
1556 					nfsauth_free_node(p);
1557 				}
1558 			}
1559 			rw_exit(&c->authc_lock);
1560 		}
1561 
1562 		if (rw_tryupgrade(&exi->exi_cache_lock) == 0) {
1563 			rw_exit(&exi->exi_cache_lock);
1564 			rw_enter(&exi->exi_cache_lock, RW_WRITER);
1565 		}
1566 
1567 		for (c = avl_first(tree); c != NULL; c = nextc) {
1568 			nextc = AVL_NEXT(tree, c);
1569 
1570 			if (avl_is_empty(&c->authc_tree) == B_FALSE)
1571 				continue;
1572 
1573 			avl_remove(tree, c);
1574 
1575 			nfsauth_free_clnt_node(c);
1576 		}
1577 
1578 		rw_exit(&exi->exi_cache_lock);
1579 	}
1580 }
1581