xref: /illumos-gate/usr/src/uts/common/fs/tmpfs/tmp_vnops.c (revision be4e997e05c92f444c81d2d197b79e67ebee2786)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2015, Joyent, Inc. All rights reserved.
29  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
30  * Copyright 2016 RackTop Systems.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/t_lock.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/user.h>
39 #include <sys/time.h>
40 #include <sys/vfs.h>
41 #include <sys/vfs_opreg.h>
42 #include <sys/vnode.h>
43 #include <sys/file.h>
44 #include <sys/fcntl.h>
45 #include <sys/flock.h>
46 #include <sys/kmem.h>
47 #include <sys/uio.h>
48 #include <sys/errno.h>
49 #include <sys/stat.h>
50 #include <sys/cred.h>
51 #include <sys/dirent.h>
52 #include <sys/pathname.h>
53 #include <sys/vmsystm.h>
54 #include <sys/fs/tmp.h>
55 #include <sys/fs/tmpnode.h>
56 #include <sys/mman.h>
57 #include <vm/hat.h>
58 #include <vm/seg_vn.h>
59 #include <vm/seg_map.h>
60 #include <vm/seg.h>
61 #include <vm/anon.h>
62 #include <vm/as.h>
63 #include <vm/page.h>
64 #include <vm/pvn.h>
65 #include <sys/cmn_err.h>
66 #include <sys/debug.h>
67 #include <sys/swap.h>
68 #include <sys/buf.h>
69 #include <sys/vm.h>
70 #include <sys/vtrace.h>
71 #include <sys/policy.h>
72 #include <fs/fs_subr.h>
73 
74 static int	tmp_getapage(struct vnode *, u_offset_t, size_t, uint_t *,
75 	page_t **, size_t, struct seg *, caddr_t, enum seg_rw, struct cred *);
76 static int 	tmp_putapage(struct vnode *, page_t *, u_offset_t *, size_t *,
77 	int, struct cred *);
78 
79 /* ARGSUSED1 */
80 static int
81 tmp_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
82 {
83 	/*
84 	 * swapon to a tmpfs file is not supported so access
85 	 * is denied on open if VISSWAP is set.
86 	 */
87 	if ((*vpp)->v_flag & VISSWAP)
88 		return (EINVAL);
89 	return (0);
90 }
91 
92 /* ARGSUSED1 */
93 static int
94 tmp_close(
95 	struct vnode *vp,
96 	int flag,
97 	int count,
98 	offset_t offset,
99 	struct cred *cred,
100 	caller_context_t *ct)
101 {
102 	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
103 	cleanshares(vp, ttoproc(curthread)->p_pid);
104 	return (0);
105 }
106 
107 /*
108  * wrtmp does the real work of write requests for tmpfs.
109  */
110 static int
111 wrtmp(
112 	struct tmount *tm,
113 	struct tmpnode *tp,
114 	struct uio *uio,
115 	struct cred *cr,
116 	struct caller_context *ct)
117 {
118 	pgcnt_t pageoffset;	/* offset in pages */
119 	ulong_t segmap_offset;	/* pagesize byte offset into segmap */
120 	caddr_t base;		/* base of segmap */
121 	ssize_t bytes;		/* bytes to uiomove */
122 	pfn_t pagenumber;	/* offset in pages into tmp file */
123 	struct vnode *vp;
124 	int error = 0;
125 	int	pagecreate;	/* == 1 if we allocated a page */
126 	int	newpage;
127 	rlim64_t limit = uio->uio_llimit;
128 	long oresid = uio->uio_resid;
129 	timestruc_t now;
130 
131 	long tn_size_changed = 0;
132 	long old_tn_size;
133 	long new_tn_size;
134 
135 	vp = TNTOV(tp);
136 	ASSERT(vp->v_type == VREG);
137 
138 	TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START,
139 	    "tmp_wrtmp_start:vp %p", vp);
140 
141 	ASSERT(RW_WRITE_HELD(&tp->tn_contents));
142 	ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
143 
144 	if (MANDLOCK(vp, tp->tn_mode)) {
145 		rw_exit(&tp->tn_contents);
146 		/*
147 		 * tmp_getattr ends up being called by chklock
148 		 */
149 		error = chklock(vp, FWRITE, uio->uio_loffset, uio->uio_resid,
150 		    uio->uio_fmode, ct);
151 		rw_enter(&tp->tn_contents, RW_WRITER);
152 		if (error != 0) {
153 			TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
154 			    "tmp_wrtmp_end:vp %p error %d", vp, error);
155 			return (error);
156 		}
157 	}
158 
159 	if (uio->uio_loffset < 0)
160 		return (EINVAL);
161 
162 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
163 		limit = MAXOFFSET_T;
164 
165 	if (uio->uio_loffset >= limit) {
166 		proc_t *p = ttoproc(curthread);
167 
168 		mutex_enter(&p->p_lock);
169 		(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
170 		    p, RCA_UNSAFE_SIGINFO);
171 		mutex_exit(&p->p_lock);
172 		return (EFBIG);
173 	}
174 
175 	if (uio->uio_loffset >= MAXOFF_T) {
176 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
177 		    "tmp_wrtmp_end:vp %p error %d", vp, EINVAL);
178 		return (EFBIG);
179 	}
180 
181 	if (uio->uio_resid == 0) {
182 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
183 		    "tmp_wrtmp_end:vp %p error %d", vp, 0);
184 		return (0);
185 	}
186 
187 	if (limit > MAXOFF_T)
188 		limit = MAXOFF_T;
189 
190 	do {
191 		long	offset;
192 		long	delta;
193 
194 		offset = (long)uio->uio_offset;
195 		pageoffset = offset & PAGEOFFSET;
196 		/*
197 		 * A maximum of PAGESIZE bytes of data is transferred
198 		 * each pass through this loop
199 		 */
200 		bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
201 
202 		if (offset + bytes >= limit) {
203 			if (offset >= limit) {
204 				error = EFBIG;
205 				goto out;
206 			}
207 			bytes = limit - offset;
208 		}
209 		pagenumber = btop(offset);
210 
211 		/*
212 		 * delta is the amount of anonymous memory
213 		 * to reserve for the file.
214 		 * We always reserve in pagesize increments so
215 		 * unless we're extending the file into a new page,
216 		 * we don't need to call tmp_resv.
217 		 */
218 		delta = offset + bytes -
219 		    P2ROUNDUP_TYPED(tp->tn_size, PAGESIZE, u_offset_t);
220 		if (delta > 0) {
221 			pagecreate = 1;
222 			if (tmp_resv(tm, tp, delta, pagecreate)) {
223 				/*
224 				 * Log file system full in the zone that owns
225 				 * the tmpfs mount, as well as in the global
226 				 * zone if necessary.
227 				 */
228 				zcmn_err(tm->tm_vfsp->vfs_zone->zone_id,
229 				    CE_WARN, "%s: File system full, "
230 				    "swap space limit exceeded",
231 				    tm->tm_mntpath);
232 
233 				if (tm->tm_vfsp->vfs_zone->zone_id !=
234 				    GLOBAL_ZONEID) {
235 
236 					vfs_t *vfs = tm->tm_vfsp;
237 
238 					zcmn_err(GLOBAL_ZONEID,
239 					    CE_WARN, "%s: File system full, "
240 					    "swap space limit exceeded",
241 					    vfs->vfs_vnodecovered->v_path);
242 				}
243 				error = ENOSPC;
244 				break;
245 			}
246 			tmpnode_growmap(tp, (ulong_t)offset + bytes);
247 		}
248 		/* grow the file to the new length */
249 		if (offset + bytes > tp->tn_size) {
250 			tn_size_changed = 1;
251 			old_tn_size = tp->tn_size;
252 			/*
253 			 * Postpone updating tp->tn_size until uiomove() is
254 			 * done.
255 			 */
256 			new_tn_size = offset + bytes;
257 		}
258 		if (bytes == PAGESIZE) {
259 			/*
260 			 * Writing whole page so reading from disk
261 			 * is a waste
262 			 */
263 			pagecreate = 1;
264 		} else {
265 			pagecreate = 0;
266 		}
267 		/*
268 		 * If writing past EOF or filling in a hole
269 		 * we need to allocate an anon slot.
270 		 */
271 		if (anon_get_ptr(tp->tn_anon, pagenumber) == NULL) {
272 			(void) anon_set_ptr(tp->tn_anon, pagenumber,
273 			    anon_alloc(vp, ptob(pagenumber)), ANON_SLEEP);
274 			pagecreate = 1;
275 			tp->tn_nblocks++;
276 		}
277 
278 		/*
279 		 * We have to drop the contents lock to allow the VM
280 		 * system to reacquire it in tmp_getpage()
281 		 */
282 		rw_exit(&tp->tn_contents);
283 
284 		/*
285 		 * Touch the page and fault it in if it is not in core
286 		 * before segmap_getmapflt or vpm_data_copy can lock it.
287 		 * This is to avoid the deadlock if the buffer is mapped
288 		 * to the same file through mmap which we want to write.
289 		 */
290 		uio_prefaultpages((long)bytes, uio);
291 
292 		newpage = 0;
293 		if (vpm_enable) {
294 			/*
295 			 * Copy data. If new pages are created, part of
296 			 * the page that is not written will be initizliazed
297 			 * with zeros.
298 			 */
299 			error = vpm_data_copy(vp, offset, bytes, uio,
300 			    !pagecreate, &newpage, 1, S_WRITE);
301 		} else {
302 			/* Get offset within the segmap mapping */
303 			segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
304 			base = segmap_getmapflt(segkmap, vp,
305 			    (offset &  MAXBMASK), PAGESIZE, !pagecreate,
306 			    S_WRITE);
307 		}
308 
309 
310 		if (!vpm_enable && pagecreate) {
311 			/*
312 			 * segmap_pagecreate() returns 1 if it calls
313 			 * page_create_va() to allocate any pages.
314 			 */
315 			newpage = segmap_pagecreate(segkmap,
316 			    base + segmap_offset, (size_t)PAGESIZE, 0);
317 			/*
318 			 * Clear from the beginning of the page to the starting
319 			 * offset of the data.
320 			 */
321 			if (pageoffset != 0)
322 				(void) kzero(base + segmap_offset,
323 				    (size_t)pageoffset);
324 		}
325 
326 		if (!vpm_enable) {
327 			error = uiomove(base + segmap_offset + pageoffset,
328 			    (long)bytes, UIO_WRITE, uio);
329 		}
330 
331 		if (!vpm_enable && pagecreate &&
332 		    uio->uio_offset < P2ROUNDUP(offset + bytes, PAGESIZE)) {
333 			long	zoffset; /* zero from offset into page */
334 			/*
335 			 * We created pages w/o initializing them completely,
336 			 * thus we need to zero the part that wasn't set up.
337 			 * This happens on most EOF write cases and if
338 			 * we had some sort of error during the uiomove.
339 			 */
340 			long nmoved;
341 
342 			nmoved = uio->uio_offset - offset;
343 			ASSERT((nmoved + pageoffset) <= PAGESIZE);
344 
345 			/*
346 			 * Zero from the end of data in the page to the
347 			 * end of the page.
348 			 */
349 			if ((zoffset = pageoffset + nmoved) < PAGESIZE)
350 				(void) kzero(base + segmap_offset + zoffset,
351 				    (size_t)PAGESIZE - zoffset);
352 		}
353 
354 		/*
355 		 * Unlock the pages which have been allocated by
356 		 * page_create_va() in segmap_pagecreate()
357 		 */
358 		if (!vpm_enable && newpage) {
359 			segmap_pageunlock(segkmap, base + segmap_offset,
360 			    (size_t)PAGESIZE, S_WRITE);
361 		}
362 
363 		if (error) {
364 			/*
365 			 * If we failed on a write, we must
366 			 * be sure to invalidate any pages that may have
367 			 * been allocated.
368 			 */
369 			if (vpm_enable) {
370 				(void) vpm_sync_pages(vp, offset, PAGESIZE,
371 				    SM_INVAL);
372 			} else {
373 				(void) segmap_release(segkmap, base, SM_INVAL);
374 			}
375 		} else {
376 			if (vpm_enable) {
377 				error = vpm_sync_pages(vp, offset, PAGESIZE,
378 				    0);
379 			} else {
380 				error = segmap_release(segkmap, base, 0);
381 			}
382 		}
383 
384 		/*
385 		 * Re-acquire contents lock.
386 		 */
387 		rw_enter(&tp->tn_contents, RW_WRITER);
388 
389 		/*
390 		 * Update tn_size.
391 		 */
392 		if (tn_size_changed)
393 			tp->tn_size = new_tn_size;
394 
395 		/*
396 		 * If the uiomove failed, fix up tn_size.
397 		 */
398 		if (error) {
399 			if (tn_size_changed) {
400 				/*
401 				 * The uiomove failed, and we
402 				 * allocated blocks,so get rid
403 				 * of them.
404 				 */
405 				(void) tmpnode_trunc(tm, tp,
406 				    (ulong_t)old_tn_size);
407 			}
408 		} else {
409 			/*
410 			 * XXX - Can this be out of the loop?
411 			 */
412 			if ((tp->tn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
413 			    (tp->tn_mode & (S_ISUID | S_ISGID)) &&
414 			    secpolicy_vnode_setid_retain(cr,
415 			    (tp->tn_mode & S_ISUID) != 0 && tp->tn_uid == 0)) {
416 				/*
417 				 * Clear Set-UID & Set-GID bits on
418 				 * successful write if not privileged
419 				 * and at least one of the execute bits
420 				 * is set.  If we always clear Set-GID,
421 				 * mandatory file and record locking is
422 				 * unuseable.
423 				 */
424 				tp->tn_mode &= ~(S_ISUID | S_ISGID);
425 			}
426 			gethrestime(&now);
427 			tp->tn_mtime = now;
428 			tp->tn_ctime = now;
429 		}
430 	} while (error == 0 && uio->uio_resid > 0 && bytes != 0);
431 
432 out:
433 	/*
434 	 * If we've already done a partial-write, terminate
435 	 * the write but return no error.
436 	 */
437 	if (oresid != uio->uio_resid)
438 		error = 0;
439 	TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
440 	    "tmp_wrtmp_end:vp %p error %d", vp, error);
441 	return (error);
442 }
443 
444 /*
445  * rdtmp does the real work of read requests for tmpfs.
446  */
447 static int
448 rdtmp(
449 	struct tmount *tm,
450 	struct tmpnode *tp,
451 	struct uio *uio,
452 	struct caller_context *ct)
453 {
454 	ulong_t pageoffset;	/* offset in tmpfs file (uio_offset) */
455 	ulong_t segmap_offset;	/* pagesize byte offset into segmap */
456 	caddr_t base;		/* base of segmap */
457 	ssize_t bytes;		/* bytes to uiomove */
458 	struct vnode *vp;
459 	int error;
460 	long oresid = uio->uio_resid;
461 
462 #if defined(lint)
463 	tm = tm;
464 #endif
465 	vp = TNTOV(tp);
466 
467 	TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START, "tmp_rdtmp_start:vp %p",
468 	    vp);
469 
470 	ASSERT(RW_LOCK_HELD(&tp->tn_contents));
471 
472 	if (MANDLOCK(vp, tp->tn_mode)) {
473 		rw_exit(&tp->tn_contents);
474 		/*
475 		 * tmp_getattr ends up being called by chklock
476 		 */
477 		error = chklock(vp, FREAD, uio->uio_loffset, uio->uio_resid,
478 		    uio->uio_fmode, ct);
479 		rw_enter(&tp->tn_contents, RW_READER);
480 		if (error != 0) {
481 			TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
482 			    "tmp_rdtmp_end:vp %p error %d", vp, error);
483 			return (error);
484 		}
485 	}
486 	ASSERT(tp->tn_type == VREG);
487 
488 	if (uio->uio_loffset >= MAXOFF_T) {
489 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
490 		    "tmp_rdtmp_end:vp %p error %d", vp, EINVAL);
491 		return (0);
492 	}
493 	if (uio->uio_loffset < 0)
494 		return (EINVAL);
495 	if (uio->uio_resid == 0) {
496 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
497 		    "tmp_rdtmp_end:vp %p error %d", vp, 0);
498 		return (0);
499 	}
500 
501 	vp = TNTOV(tp);
502 
503 	do {
504 		long diff;
505 		long offset;
506 
507 		offset = uio->uio_offset;
508 		pageoffset = offset & PAGEOFFSET;
509 		bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
510 
511 		diff = tp->tn_size - offset;
512 
513 		if (diff <= 0) {
514 			error = 0;
515 			goto out;
516 		}
517 		if (diff < bytes)
518 			bytes = diff;
519 
520 		/*
521 		 * We have to drop the contents lock to allow the VM system
522 		 * to reacquire it in tmp_getpage() should the uiomove cause a
523 		 * pagefault.
524 		 */
525 		rw_exit(&tp->tn_contents);
526 
527 		if (vpm_enable) {
528 			/*
529 			 * Copy data.
530 			 */
531 			error = vpm_data_copy(vp, offset, bytes, uio, 1, NULL,
532 			    0, S_READ);
533 		} else {
534 			segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
535 			base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK,
536 			    bytes, 1, S_READ);
537 
538 			error = uiomove(base + segmap_offset + pageoffset,
539 			    (long)bytes, UIO_READ, uio);
540 		}
541 
542 		if (error) {
543 			if (vpm_enable) {
544 				(void) vpm_sync_pages(vp, offset, PAGESIZE, 0);
545 			} else {
546 				(void) segmap_release(segkmap, base, 0);
547 			}
548 		} else {
549 			if (vpm_enable) {
550 				error = vpm_sync_pages(vp, offset, PAGESIZE,
551 				    0);
552 			} else {
553 				error = segmap_release(segkmap, base, 0);
554 			}
555 		}
556 
557 		/*
558 		 * Re-acquire contents lock.
559 		 */
560 		rw_enter(&tp->tn_contents, RW_READER);
561 
562 	} while (error == 0 && uio->uio_resid > 0);
563 
564 out:
565 	gethrestime(&tp->tn_atime);
566 
567 	/*
568 	 * If we've already done a partial read, terminate
569 	 * the read but return no error.
570 	 */
571 	if (oresid != uio->uio_resid)
572 		error = 0;
573 
574 	TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
575 	    "tmp_rdtmp_end:vp %x error %d", vp, error);
576 	return (error);
577 }
578 
579 /* ARGSUSED2 */
580 static int
581 tmp_read(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cred,
582     struct caller_context *ct)
583 {
584 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
585 	struct tmount *tm = (struct tmount *)VTOTM(vp);
586 	int error;
587 
588 	/*
589 	 * We don't currently support reading non-regular files
590 	 */
591 	if (vp->v_type == VDIR)
592 		return (EISDIR);
593 	if (vp->v_type != VREG)
594 		return (EINVAL);
595 	/*
596 	 * tmp_rwlock should have already been called from layers above
597 	 */
598 	ASSERT(RW_READ_HELD(&tp->tn_rwlock));
599 
600 	rw_enter(&tp->tn_contents, RW_READER);
601 
602 	error = rdtmp(tm, tp, uiop, ct);
603 
604 	rw_exit(&tp->tn_contents);
605 
606 	return (error);
607 }
608 
609 static int
610 tmp_write(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
611     struct caller_context *ct)
612 {
613 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
614 	struct tmount *tm = (struct tmount *)VTOTM(vp);
615 	int error;
616 
617 	/*
618 	 * We don't currently support writing to non-regular files
619 	 */
620 	if (vp->v_type != VREG)
621 		return (EINVAL);	/* XXX EISDIR? */
622 
623 	/*
624 	 * tmp_rwlock should have already been called from layers above
625 	 */
626 	ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
627 
628 	rw_enter(&tp->tn_contents, RW_WRITER);
629 
630 	if (ioflag & FAPPEND) {
631 		/*
632 		 * In append mode start at end of file.
633 		 */
634 		uiop->uio_loffset = tp->tn_size;
635 	}
636 
637 	error = wrtmp(tm, tp, uiop, cred, ct);
638 
639 	rw_exit(&tp->tn_contents);
640 
641 	return (error);
642 }
643 
644 /* ARGSUSED */
645 static int
646 tmp_ioctl(
647 	struct vnode *vp,
648 	int com,
649 	intptr_t data,
650 	int flag,
651 	struct cred *cred,
652 	int *rvalp,
653 	caller_context_t *ct)
654 {
655 	return (ENOTTY);
656 }
657 
658 /* ARGSUSED2 */
659 static int
660 tmp_getattr(
661 	struct vnode *vp,
662 	struct vattr *vap,
663 	int flags,
664 	struct cred *cred,
665 	caller_context_t *ct)
666 {
667 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
668 	struct vnode *mvp;
669 	struct vattr va;
670 	int attrs = 1;
671 
672 	/*
673 	 * A special case to handle the root tnode on a diskless nfs
674 	 * client who may have had its uid and gid inherited
675 	 * from an nfs vnode with nobody ownership.  Likely the
676 	 * root filesystem. After nfs is fully functional the uid/gid
677 	 * may be mapable so ask again.
678 	 * vfsp can't get unmounted because we hold vp.
679 	 */
680 	if (vp->v_flag & VROOT &&
681 	    (mvp = vp->v_vfsp->vfs_vnodecovered) != NULL) {
682 		mutex_enter(&tp->tn_tlock);
683 		if (tp->tn_uid == UID_NOBODY || tp->tn_gid == GID_NOBODY) {
684 			mutex_exit(&tp->tn_tlock);
685 			bzero(&va, sizeof (struct vattr));
686 			va.va_mask = AT_UID|AT_GID;
687 			attrs = VOP_GETATTR(mvp, &va, 0, cred, ct);
688 		} else {
689 			mutex_exit(&tp->tn_tlock);
690 		}
691 	}
692 	mutex_enter(&tp->tn_tlock);
693 	if (attrs == 0) {
694 		tp->tn_uid = va.va_uid;
695 		tp->tn_gid = va.va_gid;
696 	}
697 	vap->va_type = vp->v_type;
698 	vap->va_mode = tp->tn_mode & MODEMASK;
699 	vap->va_uid = tp->tn_uid;
700 	vap->va_gid = tp->tn_gid;
701 	vap->va_fsid = tp->tn_fsid;
702 	vap->va_nodeid = (ino64_t)tp->tn_nodeid;
703 	vap->va_nlink = tp->tn_nlink;
704 	vap->va_size = (u_offset_t)tp->tn_size;
705 	vap->va_atime = tp->tn_atime;
706 	vap->va_mtime = tp->tn_mtime;
707 	vap->va_ctime = tp->tn_ctime;
708 	vap->va_blksize = PAGESIZE;
709 	vap->va_rdev = tp->tn_rdev;
710 	vap->va_seq = tp->tn_seq;
711 
712 	/*
713 	 * XXX Holes are not taken into account.  We could take the time to
714 	 * run through the anon array looking for allocated slots...
715 	 */
716 	vap->va_nblocks = (fsblkcnt64_t)btodb(ptob(btopr(vap->va_size)));
717 	mutex_exit(&tp->tn_tlock);
718 	return (0);
719 }
720 
721 /*ARGSUSED4*/
722 static int
723 tmp_setattr(
724 	struct vnode *vp,
725 	struct vattr *vap,
726 	int flags,
727 	struct cred *cred,
728 	caller_context_t *ct)
729 {
730 	struct tmount *tm = (struct tmount *)VTOTM(vp);
731 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
732 	int error = 0;
733 	struct vattr *get;
734 	long mask;
735 
736 	/*
737 	 * Cannot set these attributes
738 	 */
739 	if ((vap->va_mask & AT_NOSET) || (vap->va_mask & AT_XVATTR))
740 		return (EINVAL);
741 
742 	mutex_enter(&tp->tn_tlock);
743 
744 	get = &tp->tn_attr;
745 	/*
746 	 * Change file access modes. Must be owner or have sufficient
747 	 * privileges.
748 	 */
749 	error = secpolicy_vnode_setattr(cred, vp, vap, get, flags, tmp_taccess,
750 	    tp);
751 
752 	if (error)
753 		goto out;
754 
755 	mask = vap->va_mask;
756 
757 	if (mask & AT_MODE) {
758 		get->va_mode &= S_IFMT;
759 		get->va_mode |= vap->va_mode & ~S_IFMT;
760 	}
761 
762 	if (mask & AT_UID)
763 		get->va_uid = vap->va_uid;
764 	if (mask & AT_GID)
765 		get->va_gid = vap->va_gid;
766 	if (mask & AT_ATIME)
767 		get->va_atime = vap->va_atime;
768 	if (mask & AT_MTIME)
769 		get->va_mtime = vap->va_mtime;
770 
771 	if (mask & (AT_UID | AT_GID | AT_MODE | AT_MTIME))
772 		gethrestime(&tp->tn_ctime);
773 
774 	if (mask & AT_SIZE) {
775 		ASSERT(vp->v_type != VDIR);
776 
777 		/* Don't support large files. */
778 		if (vap->va_size > MAXOFF_T) {
779 			error = EFBIG;
780 			goto out;
781 		}
782 		mutex_exit(&tp->tn_tlock);
783 
784 		rw_enter(&tp->tn_rwlock, RW_WRITER);
785 		rw_enter(&tp->tn_contents, RW_WRITER);
786 		error = tmpnode_trunc(tm, tp, (ulong_t)vap->va_size);
787 		rw_exit(&tp->tn_contents);
788 		rw_exit(&tp->tn_rwlock);
789 
790 		if (error == 0 && vap->va_size == 0)
791 			vnevent_truncate(vp, ct);
792 
793 		goto out1;
794 	}
795 out:
796 	mutex_exit(&tp->tn_tlock);
797 out1:
798 	return (error);
799 }
800 
801 /* ARGSUSED2 */
802 static int
803 tmp_access(
804 	struct vnode *vp,
805 	int mode,
806 	int flags,
807 	struct cred *cred,
808 	caller_context_t *ct)
809 {
810 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
811 	int error;
812 
813 	mutex_enter(&tp->tn_tlock);
814 	error = tmp_taccess(tp, mode, cred);
815 	mutex_exit(&tp->tn_tlock);
816 	return (error);
817 }
818 
819 /* ARGSUSED3 */
820 static int
821 tmp_lookup(
822 	struct vnode *dvp,
823 	char *nm,
824 	struct vnode **vpp,
825 	struct pathname *pnp,
826 	int flags,
827 	struct vnode *rdir,
828 	struct cred *cred,
829 	caller_context_t *ct,
830 	int *direntflags,
831 	pathname_t *realpnp)
832 {
833 	struct tmpnode *tp = (struct tmpnode *)VTOTN(dvp);
834 	struct tmpnode *ntp = NULL;
835 	int error;
836 
837 
838 	/* allow cd into @ dir */
839 	if (flags & LOOKUP_XATTR) {
840 		struct tmpnode *xdp;
841 		struct tmount *tm;
842 
843 		/*
844 		 * don't allow attributes if not mounted XATTR support
845 		 */
846 		if (!(dvp->v_vfsp->vfs_flag & VFS_XATTR))
847 			return (EINVAL);
848 
849 		if (tp->tn_flags & ISXATTR)
850 			/* No attributes on attributes */
851 			return (EINVAL);
852 
853 		rw_enter(&tp->tn_rwlock, RW_WRITER);
854 		if (tp->tn_xattrdp == NULL) {
855 			if (!(flags & CREATE_XATTR_DIR)) {
856 				rw_exit(&tp->tn_rwlock);
857 				return (ENOENT);
858 			}
859 
860 			/*
861 			 * No attribute directory exists for this
862 			 * node - create the attr dir as a side effect
863 			 * of this lookup.
864 			 */
865 
866 			/*
867 			 * Make sure we have adequate permission...
868 			 */
869 
870 			if ((error = tmp_taccess(tp, VWRITE, cred)) != 0) {
871 				rw_exit(&tp->tn_rwlock);
872 				return (error);
873 			}
874 
875 			xdp = tmp_memalloc(sizeof (struct tmpnode),
876 			    TMP_MUSTHAVE);
877 			tm = VTOTM(dvp);
878 			tmpnode_init(tm, xdp, &tp->tn_attr, NULL);
879 			/*
880 			 * Fix-up fields unique to attribute directories.
881 			 */
882 			xdp->tn_flags = ISXATTR;
883 			xdp->tn_type = VDIR;
884 			if (tp->tn_type == VDIR) {
885 				xdp->tn_mode = tp->tn_attr.va_mode;
886 			} else {
887 				xdp->tn_mode = 0700;
888 				if (tp->tn_attr.va_mode & 0040)
889 					xdp->tn_mode |= 0750;
890 				if (tp->tn_attr.va_mode & 0004)
891 					xdp->tn_mode |= 0705;
892 			}
893 			xdp->tn_vnode->v_type = VDIR;
894 			xdp->tn_vnode->v_flag |= V_XATTRDIR;
895 			tdirinit(tp, xdp);
896 			tp->tn_xattrdp = xdp;
897 		} else {
898 			VN_HOLD(tp->tn_xattrdp->tn_vnode);
899 		}
900 		*vpp = TNTOV(tp->tn_xattrdp);
901 		rw_exit(&tp->tn_rwlock);
902 		return (0);
903 	}
904 
905 	/*
906 	 * Null component name is a synonym for directory being searched.
907 	 */
908 	if (*nm == '\0') {
909 		VN_HOLD(dvp);
910 		*vpp = dvp;
911 		return (0);
912 	}
913 	ASSERT(tp);
914 
915 	error = tdirlookup(tp, nm, &ntp, cred);
916 
917 	if (error == 0) {
918 		ASSERT(ntp);
919 		*vpp = TNTOV(ntp);
920 		/*
921 		 * If vnode is a device return special vnode instead
922 		 */
923 		if (IS_DEVVP(*vpp)) {
924 			struct vnode *newvp;
925 
926 			newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
927 			    cred);
928 			VN_RELE(*vpp);
929 			*vpp = newvp;
930 		}
931 	}
932 	TRACE_4(TR_FAC_TMPFS, TR_TMPFS_LOOKUP,
933 	    "tmpfs lookup:vp %p name %s vpp %p error %d",
934 	    dvp, nm, vpp, error);
935 	return (error);
936 }
937 
938 /*ARGSUSED7*/
939 static int
940 tmp_create(
941 	struct vnode *dvp,
942 	char *nm,
943 	struct vattr *vap,
944 	enum vcexcl exclusive,
945 	int mode,
946 	struct vnode **vpp,
947 	struct cred *cred,
948 	int flag,
949 	caller_context_t *ct,
950 	vsecattr_t *vsecp)
951 {
952 	struct tmpnode *parent;
953 	struct tmount *tm;
954 	struct tmpnode *self;
955 	int error;
956 	struct tmpnode *oldtp;
957 
958 again:
959 	parent = (struct tmpnode *)VTOTN(dvp);
960 	tm = (struct tmount *)VTOTM(dvp);
961 	self = NULL;
962 	error = 0;
963 	oldtp = NULL;
964 
965 	/* device files not allowed in ext. attr dirs */
966 	if ((parent->tn_flags & ISXATTR) &&
967 	    (vap->va_type == VBLK || vap->va_type == VCHR ||
968 	    vap->va_type == VFIFO || vap->va_type == VDOOR ||
969 	    vap->va_type == VSOCK || vap->va_type == VPORT))
970 			return (EINVAL);
971 
972 	if (vap->va_type == VREG && (vap->va_mode & VSVTX)) {
973 		/* Must be privileged to set sticky bit */
974 		if (secpolicy_vnode_stky_modify(cred))
975 			vap->va_mode &= ~VSVTX;
976 	} else if (vap->va_type == VNON) {
977 		return (EINVAL);
978 	}
979 
980 	/*
981 	 * Null component name is a synonym for directory being searched.
982 	 */
983 	if (*nm == '\0') {
984 		VN_HOLD(dvp);
985 		oldtp = parent;
986 	} else {
987 		error = tdirlookup(parent, nm, &oldtp, cred);
988 	}
989 
990 	if (error == 0) {	/* name found */
991 		boolean_t trunc = B_FALSE;
992 
993 		ASSERT(oldtp);
994 
995 		rw_enter(&oldtp->tn_rwlock, RW_WRITER);
996 
997 		/*
998 		 * if create/read-only an existing
999 		 * directory, allow it
1000 		 */
1001 		if (exclusive == EXCL)
1002 			error = EEXIST;
1003 		else if ((oldtp->tn_type == VDIR) && (mode & VWRITE))
1004 			error = EISDIR;
1005 		else {
1006 			error = tmp_taccess(oldtp, mode, cred);
1007 		}
1008 
1009 		if (error) {
1010 			rw_exit(&oldtp->tn_rwlock);
1011 			tmpnode_rele(oldtp);
1012 			return (error);
1013 		}
1014 		*vpp = TNTOV(oldtp);
1015 		if ((*vpp)->v_type == VREG && (vap->va_mask & AT_SIZE) &&
1016 		    vap->va_size == 0) {
1017 			rw_enter(&oldtp->tn_contents, RW_WRITER);
1018 			(void) tmpnode_trunc(tm, oldtp, 0);
1019 			rw_exit(&oldtp->tn_contents);
1020 			trunc = B_TRUE;
1021 		}
1022 		rw_exit(&oldtp->tn_rwlock);
1023 		if (IS_DEVVP(*vpp)) {
1024 			struct vnode *newvp;
1025 
1026 			newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
1027 			    cred);
1028 			VN_RELE(*vpp);
1029 			if (newvp == NULL) {
1030 				return (ENOSYS);
1031 			}
1032 			*vpp = newvp;
1033 		}
1034 
1035 		if (trunc)
1036 			vnevent_create(*vpp, ct);
1037 
1038 		return (0);
1039 	}
1040 
1041 	if (error != ENOENT)
1042 		return (error);
1043 
1044 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1045 	error = tdirenter(tm, parent, nm, DE_CREATE,
1046 	    (struct tmpnode *)NULL, (struct tmpnode *)NULL,
1047 	    vap, &self, cred, ct);
1048 	rw_exit(&parent->tn_rwlock);
1049 
1050 	if (error) {
1051 		if (self)
1052 			tmpnode_rele(self);
1053 
1054 		if (error == EEXIST) {
1055 			/*
1056 			 * This means that the file was created sometime
1057 			 * after we checked and did not find it and when
1058 			 * we went to create it.
1059 			 * Since creat() is supposed to truncate a file
1060 			 * that already exits go back to the begining
1061 			 * of the function. This time we will find it
1062 			 * and go down the tmp_trunc() path
1063 			 */
1064 			goto again;
1065 		}
1066 		return (error);
1067 	}
1068 
1069 	*vpp = TNTOV(self);
1070 
1071 	if (!error && IS_DEVVP(*vpp)) {
1072 		struct vnode *newvp;
1073 
1074 		newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cred);
1075 		VN_RELE(*vpp);
1076 		if (newvp == NULL)
1077 			return (ENOSYS);
1078 		*vpp = newvp;
1079 	}
1080 	TRACE_3(TR_FAC_TMPFS, TR_TMPFS_CREATE,
1081 	    "tmpfs create:dvp %p nm %s vpp %p", dvp, nm, vpp);
1082 	return (0);
1083 }
1084 
1085 /* ARGSUSED3 */
1086 static int
1087 tmp_remove(
1088 	struct vnode *dvp,
1089 	char *nm,
1090 	struct cred *cred,
1091 	caller_context_t *ct,
1092 	int flags)
1093 {
1094 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1095 	int error;
1096 	struct tmpnode *tp = NULL;
1097 
1098 	error = tdirlookup(parent, nm, &tp, cred);
1099 	if (error)
1100 		return (error);
1101 
1102 	ASSERT(tp);
1103 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1104 	rw_enter(&tp->tn_rwlock, RW_WRITER);
1105 
1106 	error = (tp->tn_type == VDIR) ? EPERM :
1107 	    tdirdelete(parent, tp, nm, DR_REMOVE, cred);
1108 
1109 	rw_exit(&tp->tn_rwlock);
1110 	rw_exit(&parent->tn_rwlock);
1111 	vnevent_remove(TNTOV(tp), dvp, nm, ct);
1112 	tmpnode_rele(tp);
1113 
1114 	TRACE_3(TR_FAC_TMPFS, TR_TMPFS_REMOVE,
1115 	    "tmpfs remove:dvp %p nm %s error %d", dvp, nm, error);
1116 	return (error);
1117 }
1118 
1119 /* ARGSUSED4 */
1120 static int
1121 tmp_link(
1122 	struct vnode *dvp,
1123 	struct vnode *srcvp,
1124 	char *tnm,
1125 	struct cred *cred,
1126 	caller_context_t *ct,
1127 	int flags)
1128 {
1129 	struct tmpnode *parent;
1130 	struct tmpnode *from;
1131 	struct tmount *tm = (struct tmount *)VTOTM(dvp);
1132 	int error;
1133 	struct tmpnode *found = NULL;
1134 	struct vnode *realvp;
1135 
1136 	if (VOP_REALVP(srcvp, &realvp, ct) == 0)
1137 		srcvp = realvp;
1138 
1139 	parent = (struct tmpnode *)VTOTN(dvp);
1140 	from = (struct tmpnode *)VTOTN(srcvp);
1141 
1142 	if (srcvp->v_type == VDIR ||
1143 	    (from->tn_uid != crgetuid(cred) && secpolicy_basic_link(cred)))
1144 		return (EPERM);
1145 
1146 	/*
1147 	 * Make sure link for extended attributes is valid
1148 	 * We only support hard linking of xattr's in xattrdir to an xattrdir
1149 	 */
1150 	if ((from->tn_flags & ISXATTR) != (parent->tn_flags & ISXATTR))
1151 		return (EINVAL);
1152 
1153 	error = tdirlookup(parent, tnm, &found, cred);
1154 	if (error == 0) {
1155 		ASSERT(found);
1156 		tmpnode_rele(found);
1157 		return (EEXIST);
1158 	}
1159 
1160 	if (error != ENOENT)
1161 		return (error);
1162 
1163 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1164 	error = tdirenter(tm, parent, tnm, DE_LINK, (struct tmpnode *)NULL,
1165 	    from, NULL, (struct tmpnode **)NULL, cred, ct);
1166 	rw_exit(&parent->tn_rwlock);
1167 	if (error == 0) {
1168 		vnevent_link(srcvp, ct);
1169 	}
1170 	return (error);
1171 }
1172 
1173 /* ARGSUSED5 */
1174 static int
1175 tmp_rename(
1176 	struct vnode *odvp,	/* source parent vnode */
1177 	char *onm,		/* source name */
1178 	struct vnode *ndvp,	/* destination parent vnode */
1179 	char *nnm,		/* destination name */
1180 	struct cred *cred,
1181 	caller_context_t *ct,
1182 	int flags)
1183 {
1184 	struct tmpnode *fromparent;
1185 	struct tmpnode *toparent;
1186 	struct tmpnode *fromtp = NULL;	/* source tmpnode */
1187 	struct tmpnode *totp;		/* target tmpnode */
1188 	struct tmount *tm = (struct tmount *)VTOTM(odvp);
1189 	int error;
1190 	int samedir = 0;	/* set if odvp == ndvp */
1191 	struct vnode *realvp;
1192 
1193 	if (VOP_REALVP(ndvp, &realvp, ct) == 0)
1194 		ndvp = realvp;
1195 
1196 	fromparent = (struct tmpnode *)VTOTN(odvp);
1197 	toparent = (struct tmpnode *)VTOTN(ndvp);
1198 
1199 	if ((fromparent->tn_flags & ISXATTR) != (toparent->tn_flags & ISXATTR))
1200 		return (EINVAL);
1201 
1202 	mutex_enter(&tm->tm_renamelck);
1203 
1204 	/*
1205 	 * Look up tmpnode of file we're supposed to rename.
1206 	 */
1207 	error = tdirlookup(fromparent, onm, &fromtp, cred);
1208 	if (error) {
1209 		mutex_exit(&tm->tm_renamelck);
1210 		return (error);
1211 	}
1212 
1213 	/*
1214 	 * Make sure we can delete the old (source) entry.  This
1215 	 * requires write permission on the containing directory.  If
1216 	 * that directory is "sticky" it requires further checks.
1217 	 */
1218 	if (((error = tmp_taccess(fromparent, VWRITE, cred)) != 0) ||
1219 	    (error = tmp_sticky_remove_access(fromparent, fromtp, cred)) != 0)
1220 		goto done;
1221 
1222 	/*
1223 	 * Check for renaming to or from '.' or '..' or that
1224 	 * fromtp == fromparent
1225 	 */
1226 	if ((onm[0] == '.' &&
1227 	    (onm[1] == '\0' || (onm[1] == '.' && onm[2] == '\0'))) ||
1228 	    (nnm[0] == '.' &&
1229 	    (nnm[1] == '\0' || (nnm[1] == '.' && nnm[2] == '\0'))) ||
1230 	    (fromparent == fromtp)) {
1231 		error = EINVAL;
1232 		goto done;
1233 	}
1234 
1235 	samedir = (fromparent == toparent);
1236 	/*
1237 	 * Make sure we can search and rename into the new
1238 	 * (destination) directory.
1239 	 */
1240 	if (!samedir) {
1241 		error = tmp_taccess(toparent, VEXEC|VWRITE, cred);
1242 		if (error)
1243 			goto done;
1244 	}
1245 
1246 	if (tdirlookup(toparent, nnm, &totp, cred) == 0) {
1247 		vnevent_pre_rename_dest(TNTOV(totp), ndvp, nnm, ct);
1248 		tmpnode_rele(totp);
1249 	}
1250 
1251 	/* Notify the target dir. if not the same as the source dir. */
1252 	if (ndvp != odvp) {
1253 		vnevent_pre_rename_dest_dir(ndvp, TNTOV(fromtp), nnm, ct);
1254 	}
1255 
1256 	vnevent_pre_rename_src(TNTOV(fromtp), odvp, onm, ct);
1257 
1258 	/*
1259 	 * Link source to new target
1260 	 */
1261 	rw_enter(&toparent->tn_rwlock, RW_WRITER);
1262 	error = tdirenter(tm, toparent, nnm, DE_RENAME,
1263 	    fromparent, fromtp, (struct vattr *)NULL,
1264 	    (struct tmpnode **)NULL, cred, ct);
1265 	rw_exit(&toparent->tn_rwlock);
1266 
1267 	if (error) {
1268 		/*
1269 		 * ESAME isn't really an error; it indicates that the
1270 		 * operation should not be done because the source and target
1271 		 * are the same file, but that no error should be reported.
1272 		 */
1273 		if (error == ESAME)
1274 			error = 0;
1275 		goto done;
1276 	}
1277 
1278 	/*
1279 	 * Unlink from source.
1280 	 */
1281 	rw_enter(&fromparent->tn_rwlock, RW_WRITER);
1282 	rw_enter(&fromtp->tn_rwlock, RW_WRITER);
1283 
1284 	error = tdirdelete(fromparent, fromtp, onm, DR_RENAME, cred);
1285 
1286 	/*
1287 	 * The following handles the case where our source tmpnode was
1288 	 * removed before we got to it.
1289 	 *
1290 	 * XXX We should also cleanup properly in the case where tdirdelete
1291 	 * fails for some other reason.  Currently this case shouldn't happen.
1292 	 * (see 1184991).
1293 	 */
1294 	if (error == ENOENT)
1295 		error = 0;
1296 
1297 	rw_exit(&fromtp->tn_rwlock);
1298 	rw_exit(&fromparent->tn_rwlock);
1299 
1300 	if (error == 0) {
1301 		vnevent_rename_src(TNTOV(fromtp), odvp, onm, ct);
1302 		/*
1303 		 * vnevent_rename_dest is called in tdirenter().
1304 		 * Notify the target dir if not same as source dir.
1305 		 */
1306 		if (ndvp != odvp)
1307 			vnevent_rename_dest_dir(ndvp, ct);
1308 	}
1309 
1310 done:
1311 	tmpnode_rele(fromtp);
1312 	mutex_exit(&tm->tm_renamelck);
1313 
1314 	TRACE_5(TR_FAC_TMPFS, TR_TMPFS_RENAME,
1315 	    "tmpfs rename:ovp %p onm %s nvp %p nnm %s error %d", odvp, onm,
1316 	    ndvp, nnm, error);
1317 	return (error);
1318 }
1319 
1320 /* ARGSUSED5 */
1321 static int
1322 tmp_mkdir(
1323 	struct vnode *dvp,
1324 	char *nm,
1325 	struct vattr *va,
1326 	struct vnode **vpp,
1327 	struct cred *cred,
1328 	caller_context_t *ct,
1329 	int flags,
1330 	vsecattr_t *vsecp)
1331 {
1332 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1333 	struct tmpnode *self = NULL;
1334 	struct tmount *tm = (struct tmount *)VTOTM(dvp);
1335 	int error;
1336 
1337 	/* no new dirs allowed in xattr dirs */
1338 	if (parent->tn_flags & ISXATTR)
1339 		return (EINVAL);
1340 
1341 	/*
1342 	 * Might be dangling directory.  Catch it here,
1343 	 * because a ENOENT return from tdirlookup() is
1344 	 * an "o.k. return".
1345 	 */
1346 	if (parent->tn_nlink == 0)
1347 		return (ENOENT);
1348 
1349 	error = tdirlookup(parent, nm, &self, cred);
1350 	if (error == 0) {
1351 		ASSERT(self);
1352 		tmpnode_rele(self);
1353 		return (EEXIST);
1354 	}
1355 	if (error != ENOENT)
1356 		return (error);
1357 
1358 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1359 	error = tdirenter(tm, parent, nm, DE_MKDIR, (struct tmpnode *)NULL,
1360 	    (struct tmpnode *)NULL, va, &self, cred, ct);
1361 	if (error) {
1362 		rw_exit(&parent->tn_rwlock);
1363 		if (self)
1364 			tmpnode_rele(self);
1365 		return (error);
1366 	}
1367 	rw_exit(&parent->tn_rwlock);
1368 	*vpp = TNTOV(self);
1369 	return (0);
1370 }
1371 
1372 /* ARGSUSED4 */
1373 static int
1374 tmp_rmdir(
1375 	struct vnode *dvp,
1376 	char *nm,
1377 	struct vnode *cdir,
1378 	struct cred *cred,
1379 	caller_context_t *ct,
1380 	int flags)
1381 {
1382 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1383 	struct tmpnode *self = NULL;
1384 	struct vnode *vp;
1385 	int error = 0;
1386 
1387 	/*
1388 	 * Return error when removing . and ..
1389 	 */
1390 	if (strcmp(nm, ".") == 0)
1391 		return (EINVAL);
1392 	if (strcmp(nm, "..") == 0)
1393 		return (EEXIST); /* Should be ENOTEMPTY */
1394 	error = tdirlookup(parent, nm, &self, cred);
1395 	if (error)
1396 		return (error);
1397 
1398 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1399 	rw_enter(&self->tn_rwlock, RW_WRITER);
1400 
1401 	vp = TNTOV(self);
1402 	if (vp == dvp || vp == cdir) {
1403 		error = EINVAL;
1404 		goto done1;
1405 	}
1406 	if (self->tn_type != VDIR) {
1407 		error = ENOTDIR;
1408 		goto done1;
1409 	}
1410 
1411 	mutex_enter(&self->tn_tlock);
1412 	if (self->tn_nlink > 2) {
1413 		mutex_exit(&self->tn_tlock);
1414 		error = EEXIST;
1415 		goto done1;
1416 	}
1417 	mutex_exit(&self->tn_tlock);
1418 
1419 	if (vn_vfswlock(vp)) {
1420 		error = EBUSY;
1421 		goto done1;
1422 	}
1423 	if (vn_mountedvfs(vp) != NULL) {
1424 		error = EBUSY;
1425 		goto done;
1426 	}
1427 
1428 	/*
1429 	 * Check for an empty directory
1430 	 * i.e. only includes entries for "." and ".."
1431 	 */
1432 	if (self->tn_dirents > 2) {
1433 		error = EEXIST;		/* SIGH should be ENOTEMPTY */
1434 		/*
1435 		 * Update atime because checking tn_dirents is logically
1436 		 * equivalent to reading the directory
1437 		 */
1438 		gethrestime(&self->tn_atime);
1439 		goto done;
1440 	}
1441 
1442 	error = tdirdelete(parent, self, nm, DR_RMDIR, cred);
1443 done:
1444 	vn_vfsunlock(vp);
1445 done1:
1446 	rw_exit(&self->tn_rwlock);
1447 	rw_exit(&parent->tn_rwlock);
1448 	vnevent_rmdir(TNTOV(self), dvp, nm, ct);
1449 	tmpnode_rele(self);
1450 
1451 	return (error);
1452 }
1453 
1454 /* ARGSUSED2 */
1455 static int
1456 tmp_readdir(
1457 	struct vnode *vp,
1458 	struct uio *uiop,
1459 	struct cred *cred,
1460 	int *eofp,
1461 	caller_context_t *ct,
1462 	int flags)
1463 {
1464 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1465 	struct tdirent *tdp;
1466 	int error = 0;
1467 	size_t namelen;
1468 	struct dirent64 *dp;
1469 	ulong_t offset;
1470 	ulong_t total_bytes_wanted;
1471 	long outcount = 0;
1472 	long bufsize;
1473 	int reclen;
1474 	caddr_t outbuf;
1475 
1476 	if (uiop->uio_loffset >= MAXOFF_T) {
1477 		if (eofp)
1478 			*eofp = 1;
1479 		return (0);
1480 	}
1481 	/*
1482 	 * assuming system call has already called tmp_rwlock
1483 	 */
1484 	ASSERT(RW_READ_HELD(&tp->tn_rwlock));
1485 
1486 	if (uiop->uio_iovcnt != 1)
1487 		return (EINVAL);
1488 
1489 	if (vp->v_type != VDIR)
1490 		return (ENOTDIR);
1491 
1492 	/*
1493 	 * There's a window here where someone could have removed
1494 	 * all the entries in the directory after we put a hold on the
1495 	 * vnode but before we grabbed the rwlock.  Just return.
1496 	 */
1497 	if (tp->tn_dir == NULL) {
1498 		if (tp->tn_nlink) {
1499 			panic("empty directory 0x%p", (void *)tp);
1500 			/*NOTREACHED*/
1501 		}
1502 		return (0);
1503 	}
1504 
1505 	/*
1506 	 * Get space for multiple directory entries
1507 	 */
1508 	total_bytes_wanted = uiop->uio_iov->iov_len;
1509 	bufsize = total_bytes_wanted + sizeof (struct dirent64);
1510 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
1511 
1512 	dp = (struct dirent64 *)outbuf;
1513 
1514 
1515 	offset = 0;
1516 	tdp = tp->tn_dir;
1517 	while (tdp) {
1518 		namelen = strlen(tdp->td_name);	/* no +1 needed */
1519 		offset = tdp->td_offset;
1520 		if (offset >= uiop->uio_offset) {
1521 			reclen = (int)DIRENT64_RECLEN(namelen);
1522 			if (outcount + reclen > total_bytes_wanted) {
1523 				if (!outcount)
1524 					/*
1525 					 * Buffer too small for any entries.
1526 					 */
1527 					error = EINVAL;
1528 				break;
1529 			}
1530 			ASSERT(tdp->td_tmpnode != NULL);
1531 
1532 			/* use strncpy(9f) to zero out uninitialized bytes */
1533 
1534 			(void) strncpy(dp->d_name, tdp->td_name,
1535 			    DIRENT64_NAMELEN(reclen));
1536 			dp->d_reclen = (ushort_t)reclen;
1537 			dp->d_ino = (ino64_t)tdp->td_tmpnode->tn_nodeid;
1538 			dp->d_off = (offset_t)tdp->td_offset + 1;
1539 			dp = (struct dirent64 *)
1540 			    ((uintptr_t)dp + dp->d_reclen);
1541 			outcount += reclen;
1542 			ASSERT(outcount <= bufsize);
1543 		}
1544 		tdp = tdp->td_next;
1545 	}
1546 
1547 	if (!error)
1548 		error = uiomove(outbuf, outcount, UIO_READ, uiop);
1549 
1550 	if (!error) {
1551 		/* If we reached the end of the list our offset */
1552 		/* should now be just past the end. */
1553 		if (!tdp) {
1554 			offset += 1;
1555 			if (eofp)
1556 				*eofp = 1;
1557 		} else if (eofp)
1558 			*eofp = 0;
1559 		uiop->uio_offset = offset;
1560 	}
1561 	gethrestime(&tp->tn_atime);
1562 	kmem_free(outbuf, bufsize);
1563 	return (error);
1564 }
1565 
1566 /* ARGSUSED5 */
1567 static int
1568 tmp_symlink(
1569 	struct vnode *dvp,
1570 	char *lnm,
1571 	struct vattr *tva,
1572 	char *tnm,
1573 	struct cred *cred,
1574 	caller_context_t *ct,
1575 	int flags)
1576 {
1577 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1578 	struct tmpnode *self = (struct tmpnode *)NULL;
1579 	struct tmount *tm = (struct tmount *)VTOTM(dvp);
1580 	char *cp = NULL;
1581 	int error;
1582 	size_t len;
1583 
1584 	/* no symlinks allowed to files in xattr dirs */
1585 	if (parent->tn_flags & ISXATTR)
1586 		return (EINVAL);
1587 
1588 	error = tdirlookup(parent, lnm, &self, cred);
1589 	if (error == 0) {
1590 		/*
1591 		 * The entry already exists
1592 		 */
1593 		tmpnode_rele(self);
1594 		return (EEXIST);	/* was 0 */
1595 	}
1596 
1597 	if (error != ENOENT) {
1598 		if (self != NULL)
1599 			tmpnode_rele(self);
1600 		return (error);
1601 	}
1602 
1603 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1604 	error = tdirenter(tm, parent, lnm, DE_CREATE, (struct tmpnode *)NULL,
1605 	    (struct tmpnode *)NULL, tva, &self, cred, ct);
1606 	rw_exit(&parent->tn_rwlock);
1607 
1608 	if (error) {
1609 		if (self)
1610 			tmpnode_rele(self);
1611 		return (error);
1612 	}
1613 	len = strlen(tnm) + 1;
1614 	cp = tmp_memalloc(len, 0);
1615 	if (cp == NULL) {
1616 		tmpnode_rele(self);
1617 		return (ENOSPC);
1618 	}
1619 	(void) strcpy(cp, tnm);
1620 
1621 	self->tn_symlink = cp;
1622 	self->tn_size = len - 1;
1623 	tmpnode_rele(self);
1624 	return (error);
1625 }
1626 
1627 /* ARGSUSED2 */
1628 static int
1629 tmp_readlink(
1630 	struct vnode *vp,
1631 	struct uio *uiop,
1632 	struct cred *cred,
1633 	caller_context_t *ct)
1634 {
1635 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1636 	int error = 0;
1637 
1638 	if (vp->v_type != VLNK)
1639 		return (EINVAL);
1640 
1641 	rw_enter(&tp->tn_rwlock, RW_READER);
1642 	rw_enter(&tp->tn_contents, RW_READER);
1643 	error = uiomove(tp->tn_symlink, tp->tn_size, UIO_READ, uiop);
1644 	gethrestime(&tp->tn_atime);
1645 	rw_exit(&tp->tn_contents);
1646 	rw_exit(&tp->tn_rwlock);
1647 	return (error);
1648 }
1649 
1650 /* ARGSUSED */
1651 static int
1652 tmp_fsync(
1653 	struct vnode *vp,
1654 	int syncflag,
1655 	struct cred *cred,
1656 	caller_context_t *ct)
1657 {
1658 	return (0);
1659 }
1660 
1661 /* ARGSUSED */
1662 static void
1663 tmp_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
1664 {
1665 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1666 	struct tmount *tm = (struct tmount *)VFSTOTM(vp->v_vfsp);
1667 
1668 	rw_enter(&tp->tn_rwlock, RW_WRITER);
1669 top:
1670 	mutex_enter(&tp->tn_tlock);
1671 	mutex_enter(&vp->v_lock);
1672 	ASSERT(vp->v_count >= 1);
1673 
1674 	/*
1675 	 * If we don't have the last hold or the link count is non-zero,
1676 	 * there's little to do -- just drop our hold.
1677 	 */
1678 	if (vp->v_count > 1 || tp->tn_nlink != 0) {
1679 		vp->v_count--;
1680 		mutex_exit(&vp->v_lock);
1681 		mutex_exit(&tp->tn_tlock);
1682 		rw_exit(&tp->tn_rwlock);
1683 		return;
1684 	}
1685 
1686 	/*
1687 	 * We have the last hold *and* the link count is zero, so this
1688 	 * tmpnode is dead from the filesystem's viewpoint.  However,
1689 	 * if the tmpnode has any pages associated with it (i.e. if it's
1690 	 * a normal file with non-zero size), the tmpnode can still be
1691 	 * discovered by pageout or fsflush via the page vnode pointers.
1692 	 * In this case we must drop all our locks, truncate the tmpnode,
1693 	 * and try the whole dance again.
1694 	 */
1695 	if (tp->tn_size != 0) {
1696 		if (tp->tn_type == VREG) {
1697 			mutex_exit(&vp->v_lock);
1698 			mutex_exit(&tp->tn_tlock);
1699 			rw_enter(&tp->tn_contents, RW_WRITER);
1700 			(void) tmpnode_trunc(tm, tp, 0);
1701 			rw_exit(&tp->tn_contents);
1702 			ASSERT(tp->tn_size == 0);
1703 			ASSERT(tp->tn_nblocks == 0);
1704 			goto top;
1705 		}
1706 		if (tp->tn_type == VLNK)
1707 			tmp_memfree(tp->tn_symlink, tp->tn_size + 1);
1708 	}
1709 
1710 	/*
1711 	 * Remove normal file/dir's xattr dir and xattrs.
1712 	 */
1713 	if (tp->tn_xattrdp) {
1714 		struct tmpnode *xtp = tp->tn_xattrdp;
1715 
1716 		ASSERT(xtp->tn_flags & ISXATTR);
1717 		tmpnode_hold(xtp);
1718 		rw_enter(&xtp->tn_rwlock, RW_WRITER);
1719 		tdirtrunc(xtp);
1720 		DECR_COUNT(&xtp->tn_nlink, &xtp->tn_tlock);
1721 		tp->tn_xattrdp = NULL;
1722 		rw_exit(&xtp->tn_rwlock);
1723 		tmpnode_rele(xtp);
1724 	}
1725 
1726 	mutex_exit(&vp->v_lock);
1727 	mutex_exit(&tp->tn_tlock);
1728 	/* Here's our chance to send invalid event while we're between locks */
1729 	vn_invalid(TNTOV(tp));
1730 	mutex_enter(&tm->tm_contents);
1731 	if (tp->tn_forw == NULL)
1732 		tm->tm_rootnode->tn_back = tp->tn_back;
1733 	else
1734 		tp->tn_forw->tn_back = tp->tn_back;
1735 	tp->tn_back->tn_forw = tp->tn_forw;
1736 	mutex_exit(&tm->tm_contents);
1737 	rw_exit(&tp->tn_rwlock);
1738 	rw_destroy(&tp->tn_rwlock);
1739 	mutex_destroy(&tp->tn_tlock);
1740 	vn_free(TNTOV(tp));
1741 	tmp_memfree(tp, sizeof (struct tmpnode));
1742 }
1743 
1744 /* ARGSUSED2 */
1745 static int
1746 tmp_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
1747 {
1748 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1749 	struct tfid *tfid;
1750 
1751 	if (fidp->fid_len < (sizeof (struct tfid) - sizeof (ushort_t))) {
1752 		fidp->fid_len = sizeof (struct tfid) - sizeof (ushort_t);
1753 		return (ENOSPC);
1754 	}
1755 
1756 	tfid = (struct tfid *)fidp;
1757 	bzero(tfid, sizeof (struct tfid));
1758 	tfid->tfid_len = (int)sizeof (struct tfid) - sizeof (ushort_t);
1759 
1760 	tfid->tfid_ino = tp->tn_nodeid;
1761 	tfid->tfid_gen = tp->tn_gen;
1762 
1763 	return (0);
1764 }
1765 
1766 
1767 /*
1768  * Return all the pages from [off..off+len] in given file
1769  */
1770 /* ARGSUSED */
1771 static int
1772 tmp_getpage(
1773 	struct vnode *vp,
1774 	offset_t off,
1775 	size_t len,
1776 	uint_t *protp,
1777 	page_t *pl[],
1778 	size_t plsz,
1779 	struct seg *seg,
1780 	caddr_t addr,
1781 	enum seg_rw rw,
1782 	struct cred *cr,
1783 	caller_context_t *ct)
1784 {
1785 	int err = 0;
1786 	struct tmpnode *tp = VTOTN(vp);
1787 	anoff_t toff = (anoff_t)off;
1788 	size_t tlen = len;
1789 	u_offset_t tmpoff;
1790 	timestruc_t now;
1791 
1792 	rw_enter(&tp->tn_contents, RW_READER);
1793 
1794 	if (off + len  > tp->tn_size + PAGEOFFSET) {
1795 		err = EFAULT;
1796 		goto out;
1797 	}
1798 	/*
1799 	 * Look for holes (no anon slot) in faulting range. If there are
1800 	 * holes we have to switch to a write lock and fill them in. Swap
1801 	 * space for holes was already reserved when the file was grown.
1802 	 */
1803 	tmpoff = toff;
1804 	if (non_anon(tp->tn_anon, btop(off), &tmpoff, &tlen)) {
1805 		if (!rw_tryupgrade(&tp->tn_contents)) {
1806 			rw_exit(&tp->tn_contents);
1807 			rw_enter(&tp->tn_contents, RW_WRITER);
1808 			/* Size may have changed when lock was dropped */
1809 			if (off + len  > tp->tn_size + PAGEOFFSET) {
1810 				err = EFAULT;
1811 				goto out;
1812 			}
1813 		}
1814 		for (toff = (anoff_t)off; toff < (anoff_t)off + len;
1815 		    toff += PAGESIZE) {
1816 			if (anon_get_ptr(tp->tn_anon, btop(toff)) == NULL) {
1817 				/* XXX - may allocate mem w. write lock held */
1818 				(void) anon_set_ptr(tp->tn_anon, btop(toff),
1819 				    anon_alloc(vp, toff), ANON_SLEEP);
1820 				tp->tn_nblocks++;
1821 			}
1822 		}
1823 		rw_downgrade(&tp->tn_contents);
1824 	}
1825 
1826 
1827 	err = pvn_getpages(tmp_getapage, vp, (u_offset_t)off, len, protp,
1828 	    pl, plsz, seg, addr, rw, cr);
1829 
1830 	gethrestime(&now);
1831 	tp->tn_atime = now;
1832 	if (rw == S_WRITE)
1833 		tp->tn_mtime = now;
1834 
1835 out:
1836 	rw_exit(&tp->tn_contents);
1837 	return (err);
1838 }
1839 
1840 /*
1841  * Called from pvn_getpages to get a particular page.
1842  */
1843 /*ARGSUSED*/
1844 static int
1845 tmp_getapage(
1846 	struct vnode *vp,
1847 	u_offset_t off,
1848 	size_t len,
1849 	uint_t *protp,
1850 	page_t *pl[],
1851 	size_t plsz,
1852 	struct seg *seg,
1853 	caddr_t addr,
1854 	enum seg_rw rw,
1855 	struct cred *cr)
1856 {
1857 	struct page *pp;
1858 	int flags;
1859 	int err = 0;
1860 	struct vnode *pvp;
1861 	u_offset_t poff;
1862 
1863 	if (protp != NULL)
1864 		*protp = PROT_ALL;
1865 again:
1866 	if (pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED)) {
1867 		if (pl) {
1868 			pl[0] = pp;
1869 			pl[1] = NULL;
1870 		} else {
1871 			page_unlock(pp);
1872 		}
1873 	} else {
1874 		pp = page_create_va(vp, off, PAGESIZE,
1875 		    PG_WAIT | PG_EXCL, seg, addr);
1876 		/*
1877 		 * Someone raced in and created the page after we did the
1878 		 * lookup but before we did the create, so go back and
1879 		 * try to look it up again.
1880 		 */
1881 		if (pp == NULL)
1882 			goto again;
1883 		/*
1884 		 * Fill page from backing store, if any. If none, then
1885 		 * either this is a newly filled hole or page must have
1886 		 * been unmodified and freed so just zero it out.
1887 		 */
1888 		err = swap_getphysname(vp, off, &pvp, &poff);
1889 		if (err) {
1890 			panic("tmp_getapage: no anon slot vp %p "
1891 			    "off %llx pp %p\n", (void *)vp, off, (void *)pp);
1892 		}
1893 		if (pvp) {
1894 			flags = (pl == NULL ? B_ASYNC|B_READ : B_READ);
1895 			err = VOP_PAGEIO(pvp, pp, (u_offset_t)poff, PAGESIZE,
1896 			    flags, cr, NULL);
1897 			if (flags & B_ASYNC)
1898 				pp = NULL;
1899 		} else if (rw != S_CREATE) {
1900 			pagezero(pp, 0, PAGESIZE);
1901 		}
1902 		if (err && pp)
1903 			pvn_read_done(pp, B_ERROR);
1904 		if (err == 0) {
1905 			if (pl)
1906 				pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
1907 			else
1908 				pvn_io_done(pp);
1909 		}
1910 	}
1911 	return (err);
1912 }
1913 
1914 
1915 /*
1916  * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
1917  * If len == 0, do from off to EOF.
1918  */
1919 static int tmp_nopage = 0;	/* Don't do tmp_putpage's if set */
1920 
1921 /* ARGSUSED */
1922 int
1923 tmp_putpage(
1924 	register struct vnode *vp,
1925 	offset_t off,
1926 	size_t len,
1927 	int flags,
1928 	struct cred *cr,
1929 	caller_context_t *ct)
1930 {
1931 	register page_t *pp;
1932 	u_offset_t io_off;
1933 	size_t io_len = 0;
1934 	int err = 0;
1935 	struct tmpnode *tp = VTOTN(vp);
1936 	int dolock;
1937 
1938 	if (tmp_nopage)
1939 		return (0);
1940 
1941 	ASSERT(vp->v_count != 0);
1942 
1943 	if (vp->v_flag & VNOMAP)
1944 		return (ENOSYS);
1945 
1946 	/*
1947 	 * This being tmpfs, we don't ever do i/o unless we really
1948 	 * have to (when we're low on memory and pageout calls us
1949 	 * with B_ASYNC | B_FREE or the user explicitly asks for it with
1950 	 * B_DONTNEED).
1951 	 * XXX to approximately track the mod time like ufs we should
1952 	 * update the times here. The problem is, once someone does a
1953 	 * store we never clear the mod bit and do i/o, thus fsflush
1954 	 * will keep calling us every 30 seconds to do the i/o and we'll
1955 	 * continually update the mod time. At least we update the mod
1956 	 * time on the first store because this results in a call to getpage.
1957 	 */
1958 	if (flags != (B_ASYNC | B_FREE) && (flags & B_INVAL) == 0 &&
1959 	    (flags & B_DONTNEED) == 0)
1960 		return (0);
1961 	/*
1962 	 * If this thread owns the lock, i.e., this thread grabbed it
1963 	 * as writer somewhere above, then we don't need to grab the
1964 	 * lock as reader in this routine.
1965 	 */
1966 	dolock = (rw_owner(&tp->tn_contents) != curthread);
1967 
1968 	/*
1969 	 * If this is pageout don't block on the lock as you could deadlock
1970 	 * when freemem == 0 (another thread has the read lock and is blocked
1971 	 * creating a page, and a third thread is waiting to get the writers
1972 	 * lock - waiting writers priority blocks us from getting the read
1973 	 * lock). Of course, if the only freeable pages are on this tmpnode
1974 	 * we're hosed anyways. A better solution might be a new lock type.
1975 	 * Note: ufs has the same problem.
1976 	 */
1977 	if (curproc == proc_pageout) {
1978 		if (!rw_tryenter(&tp->tn_contents, RW_READER))
1979 			return (ENOMEM);
1980 	} else if (dolock)
1981 		rw_enter(&tp->tn_contents, RW_READER);
1982 
1983 	if (!vn_has_cached_data(vp))
1984 		goto out;
1985 
1986 	if (len == 0) {
1987 		if (curproc == proc_pageout) {
1988 			panic("tmp: pageout can't block");
1989 			/*NOTREACHED*/
1990 		}
1991 
1992 		/* Search the entire vp list for pages >= off. */
1993 		err = pvn_vplist_dirty(vp, (u_offset_t)off, tmp_putapage,
1994 		    flags, cr);
1995 	} else {
1996 		u_offset_t eoff;
1997 
1998 		/*
1999 		 * Loop over all offsets in the range [off...off + len]
2000 		 * looking for pages to deal with.
2001 		 */
2002 		eoff = MIN(off + len, tp->tn_size);
2003 		for (io_off = off; io_off < eoff; io_off += io_len) {
2004 			/*
2005 			 * If we are not invalidating, synchronously
2006 			 * freeing or writing pages use the routine
2007 			 * page_lookup_nowait() to prevent reclaiming
2008 			 * them from the free list.
2009 			 */
2010 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
2011 				pp = page_lookup(vp, io_off,
2012 				    (flags & (B_INVAL | B_FREE)) ?
2013 				    SE_EXCL : SE_SHARED);
2014 			} else {
2015 				pp = page_lookup_nowait(vp, io_off,
2016 				    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
2017 			}
2018 
2019 			if (pp == NULL || pvn_getdirty(pp, flags) == 0)
2020 				io_len = PAGESIZE;
2021 			else {
2022 				err = tmp_putapage(vp, pp, &io_off, &io_len,
2023 				    flags, cr);
2024 				if (err != 0)
2025 					break;
2026 			}
2027 		}
2028 	}
2029 	/* If invalidating, verify all pages on vnode list are gone. */
2030 	if (err == 0 && off == 0 && len == 0 &&
2031 	    (flags & B_INVAL) && vn_has_cached_data(vp)) {
2032 		panic("tmp_putpage: B_INVAL, pages not gone");
2033 		/*NOTREACHED*/
2034 	}
2035 out:
2036 	if ((curproc == proc_pageout) || dolock)
2037 		rw_exit(&tp->tn_contents);
2038 	/*
2039 	 * Only reason putapage is going to give us SE_NOSWAP as error
2040 	 * is when we ask a page to be written to physical backing store
2041 	 * and there is none. Ignore this because we might be dealing
2042 	 * with a swap page which does not have any backing store
2043 	 * on disk. In any other case we won't get this error over here.
2044 	 */
2045 	if (err == SE_NOSWAP)
2046 		err = 0;
2047 	return (err);
2048 }
2049 
2050 long tmp_putpagecnt, tmp_pagespushed;
2051 
2052 /*
2053  * Write out a single page.
2054  * For tmpfs this means choose a physical swap slot and write the page
2055  * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
2056  * we try to find a bunch of other dirty pages adjacent in the file
2057  * and a bunch of contiguous swap slots, and then write all the pages
2058  * out in a single i/o.
2059  */
2060 /*ARGSUSED*/
2061 static int
2062 tmp_putapage(
2063 	struct vnode *vp,
2064 	page_t *pp,
2065 	u_offset_t *offp,
2066 	size_t *lenp,
2067 	int flags,
2068 	struct cred *cr)
2069 {
2070 	int err;
2071 	ulong_t klstart, kllen;
2072 	page_t *pplist, *npplist;
2073 	extern int klustsize;
2074 	long tmp_klustsize;
2075 	struct tmpnode *tp;
2076 	size_t pp_off, pp_len;
2077 	u_offset_t io_off;
2078 	size_t io_len;
2079 	struct vnode *pvp;
2080 	u_offset_t pstart;
2081 	u_offset_t offset;
2082 	u_offset_t tmpoff;
2083 
2084 	ASSERT(PAGE_LOCKED(pp));
2085 
2086 	/* Kluster in tmp_klustsize chunks */
2087 	tp = VTOTN(vp);
2088 	tmp_klustsize = klustsize;
2089 	offset = pp->p_offset;
2090 	klstart = (offset / tmp_klustsize) * tmp_klustsize;
2091 	kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
2092 
2093 	/* Get a kluster of pages */
2094 	pplist =
2095 	    pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
2096 
2097 	pp_off = (size_t)tmpoff;
2098 
2099 	/*
2100 	 * Get a cluster of physical offsets for the pages; the amount we
2101 	 * get may be some subrange of what we ask for (io_off, io_len).
2102 	 */
2103 	io_off = pp_off;
2104 	io_len = pp_len;
2105 	err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
2106 	ASSERT(err != SE_NOANON); /* anon slot must have been filled */
2107 	if (err) {
2108 		pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2109 		/*
2110 		 * If this routine is called as a result of segvn_sync
2111 		 * operation and we have no physical swap then we can get an
2112 		 * error here. In such case we would return SE_NOSWAP as error.
2113 		 * At this point, we expect only SE_NOSWAP.
2114 		 */
2115 		ASSERT(err == SE_NOSWAP);
2116 		if (flags & B_INVAL)
2117 			err = ENOMEM;
2118 		goto out;
2119 	}
2120 	ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
2121 	ASSERT(io_off <= offset && offset < io_off + io_len);
2122 
2123 	/* Toss pages at front/rear that we couldn't get physical backing for */
2124 	if (io_off != pp_off) {
2125 		npplist = NULL;
2126 		page_list_break(&pplist, &npplist, btop(io_off - pp_off));
2127 		ASSERT(pplist->p_offset == pp_off);
2128 		ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
2129 		pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2130 		pplist = npplist;
2131 	}
2132 	if (io_off + io_len < pp_off + pp_len) {
2133 		npplist = NULL;
2134 		page_list_break(&pplist, &npplist, btop(io_len));
2135 		ASSERT(npplist->p_offset == io_off + io_len);
2136 		ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);
2137 		pvn_write_done(npplist, B_ERROR | B_WRITE | flags);
2138 	}
2139 
2140 	ASSERT(pplist->p_offset == io_off);
2141 	ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
2142 	ASSERT(btopr(io_len) <= btopr(kllen));
2143 
2144 	/* Do i/o on the remaining kluster */
2145 	err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2146 	    B_WRITE | flags, cr, NULL);
2147 
2148 	if ((flags & B_ASYNC) == 0) {
2149 		pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2150 	}
2151 out:
2152 	if (!err) {
2153 		if (offp)
2154 			*offp = io_off;
2155 		if (lenp)
2156 			*lenp = io_len;
2157 		tmp_putpagecnt++;
2158 		tmp_pagespushed += btop(io_len);
2159 	}
2160 	if (err && err != ENOMEM && err != SE_NOSWAP)
2161 		cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
2162 	return (err);
2163 }
2164 
2165 /* ARGSUSED */
2166 static int
2167 tmp_map(
2168 	struct vnode *vp,
2169 	offset_t off,
2170 	struct as *as,
2171 	caddr_t *addrp,
2172 	size_t len,
2173 	uchar_t prot,
2174 	uchar_t maxprot,
2175 	uint_t flags,
2176 	struct cred *cred,
2177 	caller_context_t *ct)
2178 {
2179 	struct segvn_crargs vn_a;
2180 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
2181 	int error;
2182 
2183 #ifdef _ILP32
2184 	if (len > MAXOFF_T)
2185 		return (ENOMEM);
2186 #endif
2187 
2188 	if (vp->v_flag & VNOMAP)
2189 		return (ENOSYS);
2190 
2191 	if (off < 0 || (offset_t)(off + len) < 0 ||
2192 	    off > MAXOFF_T || (off + len) > MAXOFF_T)
2193 		return (ENXIO);
2194 
2195 	if (vp->v_type != VREG)
2196 		return (ENODEV);
2197 
2198 	/*
2199 	 * Don't allow mapping to locked file
2200 	 */
2201 	if (vn_has_mandatory_locks(vp, tp->tn_mode)) {
2202 		return (EAGAIN);
2203 	}
2204 
2205 	as_rangelock(as);
2206 	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
2207 	if (error != 0) {
2208 		as_rangeunlock(as);
2209 		return (error);
2210 	}
2211 
2212 	vn_a.vp = vp;
2213 	vn_a.offset = (u_offset_t)off;
2214 	vn_a.type = flags & MAP_TYPE;
2215 	vn_a.prot = prot;
2216 	vn_a.maxprot = maxprot;
2217 	vn_a.flags = flags & ~MAP_TYPE;
2218 	vn_a.cred = cred;
2219 	vn_a.amp = NULL;
2220 	vn_a.szc = 0;
2221 	vn_a.lgrp_mem_policy_flags = 0;
2222 
2223 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
2224 	as_rangeunlock(as);
2225 	return (error);
2226 }
2227 
2228 /*
2229  * tmp_addmap and tmp_delmap can't be called since the vp
2230  * maintained in the segvn mapping is NULL.
2231  */
2232 /* ARGSUSED */
2233 static int
2234 tmp_addmap(
2235 	struct vnode *vp,
2236 	offset_t off,
2237 	struct as *as,
2238 	caddr_t addr,
2239 	size_t len,
2240 	uchar_t prot,
2241 	uchar_t maxprot,
2242 	uint_t flags,
2243 	struct cred *cred,
2244 	caller_context_t *ct)
2245 {
2246 	return (0);
2247 }
2248 
2249 /* ARGSUSED */
2250 static int
2251 tmp_delmap(
2252 	struct vnode *vp,
2253 	offset_t off,
2254 	struct as *as,
2255 	caddr_t addr,
2256 	size_t len,
2257 	uint_t prot,
2258 	uint_t maxprot,
2259 	uint_t flags,
2260 	struct cred *cred,
2261 	caller_context_t *ct)
2262 {
2263 	return (0);
2264 }
2265 
2266 static int
2267 tmp_freesp(struct vnode *vp, struct flock64 *lp, int flag)
2268 {
2269 	register int i;
2270 	register struct tmpnode *tp = VTOTN(vp);
2271 	int error;
2272 
2273 	ASSERT(vp->v_type == VREG);
2274 	ASSERT(lp->l_start >= 0);
2275 
2276 	if (lp->l_len != 0)
2277 		return (EINVAL);
2278 
2279 	rw_enter(&tp->tn_rwlock, RW_WRITER);
2280 	if (tp->tn_size == lp->l_start) {
2281 		rw_exit(&tp->tn_rwlock);
2282 		return (0);
2283 	}
2284 
2285 	/*
2286 	 * Check for any mandatory locks on the range
2287 	 */
2288 	if (MANDLOCK(vp, tp->tn_mode)) {
2289 		long save_start;
2290 
2291 		save_start = lp->l_start;
2292 
2293 		if (tp->tn_size < lp->l_start) {
2294 			/*
2295 			 * "Truncate up" case: need to make sure there
2296 			 * is no lock beyond current end-of-file. To
2297 			 * do so, we need to set l_start to the size
2298 			 * of the file temporarily.
2299 			 */
2300 			lp->l_start = tp->tn_size;
2301 		}
2302 		lp->l_type = F_WRLCK;
2303 		lp->l_sysid = 0;
2304 		lp->l_pid = ttoproc(curthread)->p_pid;
2305 		i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
2306 		if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
2307 		    lp->l_type != F_UNLCK) {
2308 			rw_exit(&tp->tn_rwlock);
2309 			return (i ? i : EAGAIN);
2310 		}
2311 
2312 		lp->l_start = save_start;
2313 	}
2314 	VFSTOTM(vp->v_vfsp);
2315 
2316 	rw_enter(&tp->tn_contents, RW_WRITER);
2317 	error = tmpnode_trunc((struct tmount *)VFSTOTM(vp->v_vfsp),
2318 	    tp, (ulong_t)lp->l_start);
2319 	rw_exit(&tp->tn_contents);
2320 	rw_exit(&tp->tn_rwlock);
2321 	return (error);
2322 }
2323 
2324 /* ARGSUSED */
2325 static int
2326 tmp_space(
2327 	struct vnode *vp,
2328 	int cmd,
2329 	struct flock64 *bfp,
2330 	int flag,
2331 	offset_t offset,
2332 	cred_t *cred,
2333 	caller_context_t *ct)
2334 {
2335 	int error;
2336 
2337 	if (cmd != F_FREESP)
2338 		return (EINVAL);
2339 	if ((error = convoff(vp, bfp, 0, (offset_t)offset)) == 0) {
2340 		if ((bfp->l_start > MAXOFF_T) || (bfp->l_len > MAXOFF_T))
2341 			return (EFBIG);
2342 		error = tmp_freesp(vp, bfp, flag);
2343 
2344 		if (error == 0 && bfp->l_start == 0)
2345 			vnevent_truncate(vp, ct);
2346 	}
2347 	return (error);
2348 }
2349 
2350 /* ARGSUSED */
2351 static int
2352 tmp_seek(
2353 	struct vnode *vp,
2354 	offset_t ooff,
2355 	offset_t *noffp,
2356 	caller_context_t *ct)
2357 {
2358 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
2359 }
2360 
2361 /* ARGSUSED2 */
2362 static int
2363 tmp_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2364 {
2365 	struct tmpnode *tp = VTOTN(vp);
2366 
2367 	if (write_lock) {
2368 		rw_enter(&tp->tn_rwlock, RW_WRITER);
2369 	} else {
2370 		rw_enter(&tp->tn_rwlock, RW_READER);
2371 	}
2372 	return (write_lock);
2373 }
2374 
2375 /* ARGSUSED1 */
2376 static void
2377 tmp_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2378 {
2379 	struct tmpnode *tp = VTOTN(vp);
2380 
2381 	rw_exit(&tp->tn_rwlock);
2382 }
2383 
2384 static int
2385 tmp_pathconf(
2386 	struct vnode *vp,
2387 	int cmd,
2388 	ulong_t *valp,
2389 	cred_t *cr,
2390 	caller_context_t *ct)
2391 {
2392 	struct tmpnode *tp = NULL;
2393 	int error;
2394 
2395 	switch (cmd) {
2396 	case _PC_XATTR_EXISTS:
2397 		if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
2398 			*valp = 0;	/* assume no attributes */
2399 			error = 0;	/* okay to ask */
2400 			tp = VTOTN(vp);
2401 			rw_enter(&tp->tn_rwlock, RW_READER);
2402 			if (tp->tn_xattrdp) {
2403 				rw_enter(&tp->tn_xattrdp->tn_rwlock, RW_READER);
2404 				/* do not count "." and ".." */
2405 				if (tp->tn_xattrdp->tn_dirents > 2)
2406 					*valp = 1;
2407 				rw_exit(&tp->tn_xattrdp->tn_rwlock);
2408 			}
2409 			rw_exit(&tp->tn_rwlock);
2410 		} else {
2411 			error = EINVAL;
2412 		}
2413 		break;
2414 	case _PC_SATTR_ENABLED:
2415 	case _PC_SATTR_EXISTS:
2416 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2417 		    (vp->v_type == VREG || vp->v_type == VDIR);
2418 		error = 0;
2419 		break;
2420 	case _PC_TIMESTAMP_RESOLUTION:
2421 		/* nanosecond timestamp resolution */
2422 		*valp = 1L;
2423 		error = 0;
2424 		break;
2425 	default:
2426 		error = fs_pathconf(vp, cmd, valp, cr, ct);
2427 	}
2428 	return (error);
2429 }
2430 
2431 
2432 struct vnodeops *tmp_vnodeops;
2433 
2434 const fs_operation_def_t tmp_vnodeops_template[] = {
2435 	VOPNAME_OPEN,		{ .vop_open = tmp_open },
2436 	VOPNAME_CLOSE,		{ .vop_close = tmp_close },
2437 	VOPNAME_READ,		{ .vop_read = tmp_read },
2438 	VOPNAME_WRITE,		{ .vop_write = tmp_write },
2439 	VOPNAME_IOCTL,		{ .vop_ioctl = tmp_ioctl },
2440 	VOPNAME_GETATTR,	{ .vop_getattr = tmp_getattr },
2441 	VOPNAME_SETATTR,	{ .vop_setattr = tmp_setattr },
2442 	VOPNAME_ACCESS,		{ .vop_access = tmp_access },
2443 	VOPNAME_LOOKUP,		{ .vop_lookup = tmp_lookup },
2444 	VOPNAME_CREATE,		{ .vop_create = tmp_create },
2445 	VOPNAME_REMOVE,		{ .vop_remove = tmp_remove },
2446 	VOPNAME_LINK,		{ .vop_link = tmp_link },
2447 	VOPNAME_RENAME,		{ .vop_rename = tmp_rename },
2448 	VOPNAME_MKDIR,		{ .vop_mkdir = tmp_mkdir },
2449 	VOPNAME_RMDIR,		{ .vop_rmdir = tmp_rmdir },
2450 	VOPNAME_READDIR,	{ .vop_readdir = tmp_readdir },
2451 	VOPNAME_SYMLINK,	{ .vop_symlink = tmp_symlink },
2452 	VOPNAME_READLINK,	{ .vop_readlink = tmp_readlink },
2453 	VOPNAME_FSYNC,		{ .vop_fsync = tmp_fsync },
2454 	VOPNAME_INACTIVE,	{ .vop_inactive = tmp_inactive },
2455 	VOPNAME_FID,		{ .vop_fid = tmp_fid },
2456 	VOPNAME_RWLOCK,		{ .vop_rwlock = tmp_rwlock },
2457 	VOPNAME_RWUNLOCK,	{ .vop_rwunlock = tmp_rwunlock },
2458 	VOPNAME_SEEK,		{ .vop_seek = tmp_seek },
2459 	VOPNAME_SPACE,		{ .vop_space = tmp_space },
2460 	VOPNAME_GETPAGE,	{ .vop_getpage = tmp_getpage },
2461 	VOPNAME_PUTPAGE,	{ .vop_putpage = tmp_putpage },
2462 	VOPNAME_MAP,		{ .vop_map = tmp_map },
2463 	VOPNAME_ADDMAP,		{ .vop_addmap = tmp_addmap },
2464 	VOPNAME_DELMAP,		{ .vop_delmap = tmp_delmap },
2465 	VOPNAME_PATHCONF,	{ .vop_pathconf = tmp_pathconf },
2466 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
2467 	NULL,			NULL
2468 };
2469