xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision dcbf3bd6a1f1360fc1afcee9e22c6dcff7844bf2)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27#
28
29. ${STF_TOOLS}/contrib/include/logapi.shlib
30
31ZFS=${ZFS:-/usr/sbin/zfs}
32ZPOOL=${ZPOOL:-/usr/sbin/zpool}
33
34# Determine whether a dataset is mounted
35#
36# $1 dataset name
37# $2 filesystem type; optional - defaulted to zfs
38#
39# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
40
41function ismounted
42{
43	typeset fstype=$2
44	[[ -z $fstype ]] && fstype=zfs
45	typeset out dir name ret
46
47	case $fstype in
48		zfs)
49			if [[ "$1" == "/"* ]] ; then
50				for out in $($ZFS mount | $AWK '{print $2}'); do
51					[[ $1 == $out ]] && return 0
52				done
53			else
54				for out in $($ZFS mount | $AWK '{print $1}'); do
55					[[ $1 == $out ]] && return 0
56				done
57			fi
58		;;
59		ufs|nfs)
60			out=$($DF -F $fstype $1 2>/dev/null)
61			ret=$?
62			(($ret != 0)) && return $ret
63
64			dir=${out%%\(*}
65			dir=${dir%% *}
66			name=${out##*\(}
67			name=${name%%\)*}
68			name=${name%% *}
69
70			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
71		;;
72	esac
73
74	return 1
75}
76
77# Return 0 if a dataset is mounted; 1 otherwise
78#
79# $1 dataset name
80# $2 filesystem type; optional - defaulted to zfs
81
82function mounted
83{
84	ismounted $1 $2
85	(($? == 0)) && return 0
86	return 1
87}
88
89# Return 0 if a dataset is unmounted; 1 otherwise
90#
91# $1 dataset name
92# $2 filesystem type; optional - defaulted to zfs
93
94function unmounted
95{
96	ismounted $1 $2
97	(($? == 1)) && return 0
98	return 1
99}
100
101# split line on ","
102#
103# $1 - line to split
104
105function splitline
106{
107	$ECHO $1 | $SED "s/,/ /g"
108}
109
110function default_setup
111{
112	default_setup_noexit "$@"
113
114	log_pass
115}
116
117#
118# Given a list of disks, setup storage pools and datasets.
119#
120function default_setup_noexit
121{
122	typeset disklist=$1
123	typeset container=$2
124	typeset volume=$3
125
126	if is_global_zone; then
127		if poolexists $TESTPOOL ; then
128			destroy_pool $TESTPOOL
129		fi
130		[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
131		log_must $ZPOOL create -f $TESTPOOL $disklist
132	else
133		reexport_pool
134	fi
135
136	$RM -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
137	$MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
138
139	log_must $ZFS create $TESTPOOL/$TESTFS
140	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
141
142	if [[ -n $container ]]; then
143		$RM -rf $TESTDIR1  || \
144			log_unresolved Could not remove $TESTDIR1
145		$MKDIR -p $TESTDIR1 || \
146			log_unresolved Could not create $TESTDIR1
147
148		log_must $ZFS create $TESTPOOL/$TESTCTR
149		log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
150		log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
151		log_must $ZFS set mountpoint=$TESTDIR1 \
152		    $TESTPOOL/$TESTCTR/$TESTFS1
153	fi
154
155	if [[ -n $volume ]]; then
156		if is_global_zone ; then
157			log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
158		else
159			log_must $ZFS create $TESTPOOL/$TESTVOL
160		fi
161	fi
162}
163
164#
165# Given a list of disks, setup a storage pool, file system and
166# a container.
167#
168function default_container_setup
169{
170	typeset disklist=$1
171
172	default_setup "$disklist" "true"
173}
174
175#
176# Given a list of disks, setup a storage pool,file system
177# and a volume.
178#
179function default_volume_setup
180{
181	typeset disklist=$1
182
183	default_setup "$disklist" "" "true"
184}
185
186#
187# Given a list of disks, setup a storage pool,file system,
188# a container and a volume.
189#
190function default_container_volume_setup
191{
192	typeset disklist=$1
193
194	default_setup "$disklist" "true" "true"
195}
196
197#
198# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
199# filesystem
200#
201# $1 Existing filesystem or volume name. Default, $TESTFS
202# $2 snapshot name. Default, $TESTSNAP
203#
204function create_snapshot
205{
206	typeset fs_vol=${1:-$TESTFS}
207	typeset snap=${2:-$TESTSNAP}
208
209	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
210	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
211
212	if snapexists $fs_vol@$snap; then
213		log_fail "$fs_vol@$snap already exists."
214	fi
215	datasetexists $fs_vol || \
216		log_fail "$fs_vol must exist."
217
218	log_must $ZFS snapshot $fs_vol@$snap
219}
220
221#
222# Create a clone from a snapshot, default clone name is $TESTCLONE.
223#
224# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
225# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
226#
227function create_clone   # snapshot clone
228{
229	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
230	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
231
232	[[ -z $snap ]] && \
233		log_fail "Snapshot name is undefined."
234	[[ -z $clone ]] && \
235		log_fail "Clone name is undefined."
236
237	log_must $ZFS clone $snap $clone
238}
239
240function default_mirror_setup
241{
242	default_mirror_setup_noexit $1 $2 $3
243
244	log_pass
245}
246
247#
248# Given a pair of disks, set up a storage pool and dataset for the mirror
249# @parameters: $1 the primary side of the mirror
250#   $2 the secondary side of the mirror
251# @uses: ZPOOL ZFS TESTPOOL TESTFS
252function default_mirror_setup_noexit
253{
254	readonly func="default_mirror_setup_noexit"
255	typeset primary=$1
256	typeset secondary=$2
257
258	[[ -z $primary ]] && \
259		log_fail "$func: No parameters passed"
260	[[ -z $secondary ]] && \
261		log_fail "$func: No secondary partition passed"
262	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
263	log_must $ZPOOL create -f $TESTPOOL mirror $@
264	log_must $ZFS create $TESTPOOL/$TESTFS
265	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
266}
267
268#
269# create a number of mirrors.
270# We create a number($1) of 2 way mirrors using the pairs of disks named
271# on the command line. These mirrors are *not* mounted
272# @parameters: $1 the number of mirrors to create
273#  $... the devices to use to create the mirrors on
274# @uses: ZPOOL ZFS TESTPOOL
275function setup_mirrors
276{
277	typeset -i nmirrors=$1
278
279	shift
280	while ((nmirrors > 0)); do
281		log_must test -n "$1" -a -n "$2"
282		[[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
283		log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
284		shift 2
285		((nmirrors = nmirrors - 1))
286	done
287}
288
289#
290# create a number of raidz pools.
291# We create a number($1) of 2 raidz pools  using the pairs of disks named
292# on the command line. These pools are *not* mounted
293# @parameters: $1 the number of pools to create
294#  $... the devices to use to create the pools on
295# @uses: ZPOOL ZFS TESTPOOL
296function setup_raidzs
297{
298	typeset -i nraidzs=$1
299
300	shift
301	while ((nraidzs > 0)); do
302		log_must test -n "$1" -a -n "$2"
303		[[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
304		log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
305		shift 2
306		((nraidzs = nraidzs - 1))
307	done
308}
309
310#
311# Destroy the configured testpool mirrors.
312# the mirrors are of the form ${TESTPOOL}{number}
313# @uses: ZPOOL ZFS TESTPOOL
314function destroy_mirrors
315{
316	default_cleanup_noexit
317
318	log_pass
319}
320
321#
322# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
323# $1 the list of disks
324#
325function default_raidz_setup
326{
327	typeset disklist="$*"
328	disks=(${disklist[*]})
329
330	if [[ ${#disks[*]} -lt 2 ]]; then
331		log_fail "A raid-z requires a minimum of two disks."
332	fi
333
334	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
335	log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
336	log_must $ZFS create $TESTPOOL/$TESTFS
337	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
338
339	log_pass
340}
341
342#
343# Common function used to cleanup storage pools and datasets.
344#
345# Invoked at the start of the test suite to ensure the system
346# is in a known state, and also at the end of each set of
347# sub-tests to ensure errors from one set of tests doesn't
348# impact the execution of the next set.
349
350function default_cleanup
351{
352	default_cleanup_noexit
353
354	log_pass
355}
356
357function default_cleanup_noexit
358{
359	typeset exclude=""
360	typeset pool=""
361	#
362	# Destroying the pool will also destroy any
363	# filesystems it contains.
364	#
365	if is_global_zone; then
366		$ZFS unmount -a > /dev/null 2>&1
367		exclude=`eval $ECHO \"'(${KEEP})'\"`
368		ALL_POOLS=$($ZPOOL list -H -o name \
369		    | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
370		# Here, we loop through the pools we're allowed to
371		# destroy, only destroying them if it's safe to do
372		# so.
373		while [ ! -z ${ALL_POOLS} ]
374		do
375			for pool in ${ALL_POOLS}
376			do
377				if safe_to_destroy_pool $pool ;
378				then
379					destroy_pool $pool
380				fi
381				ALL_POOLS=$($ZPOOL list -H -o name \
382				    | $GREP -v "$NO_POOLS" \
383				    | $EGREP -v "$exclude")
384			done
385		done
386
387		$ZFS mount -a
388	else
389		typeset fs=""
390		for fs in $($ZFS list -H -o name \
391		    | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
392			datasetexists $fs && \
393				log_must $ZFS destroy -Rf $fs
394		done
395
396		# Need cleanup here to avoid garbage dir left.
397		for fs in $($ZFS list -H -o name); do
398			[[ $fs == /$ZONE_POOL ]] && continue
399			[[ -d $fs ]] && log_must $RM -rf $fs/*
400		done
401
402		#
403		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
404		# the default value
405		#
406		for fs in $($ZFS list -H -o name); do
407			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
408				log_must $ZFS set reservation=none $fs
409				log_must $ZFS set recordsize=128K $fs
410				log_must $ZFS set mountpoint=/$fs $fs
411				typeset enc=""
412				enc=$(get_prop encryption $fs)
413				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
414					[[ "$enc" == "off" ]]; then
415					log_must $ZFS set checksum=on $fs
416				fi
417				log_must $ZFS set compression=off $fs
418				log_must $ZFS set atime=on $fs
419				log_must $ZFS set devices=off $fs
420				log_must $ZFS set exec=on $fs
421				log_must $ZFS set setuid=on $fs
422				log_must $ZFS set readonly=off $fs
423				log_must $ZFS set snapdir=hidden $fs
424				log_must $ZFS set aclmode=groupmask $fs
425				log_must $ZFS set aclinherit=secure $fs
426			fi
427		done
428	fi
429
430	[[ -d $TESTDIR ]] && \
431		log_must $RM -rf $TESTDIR
432}
433
434
435#
436# Common function used to cleanup storage pools, file systems
437# and containers.
438#
439function default_container_cleanup
440{
441	if ! is_global_zone; then
442		reexport_pool
443	fi
444
445	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
446	[[ $? -eq 0 ]] && \
447	    log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
448
449	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
450	    log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
451
452	datasetexists $TESTPOOL/$TESTCTR && \
453	    log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
454
455	[[ -e $TESTDIR1 ]] && \
456	    log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
457
458	default_cleanup
459}
460
461#
462# Common function used to cleanup snapshot of file system or volume. Default to
463# delete the file system's snapshot
464#
465# $1 snapshot name
466#
467function destroy_snapshot
468{
469	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
470
471	if ! snapexists $snap; then
472		log_fail "'$snap' does not existed."
473	fi
474
475	#
476	# For the sake of the value which come from 'get_prop' is not equal
477	# to the really mountpoint when the snapshot is unmounted. So, firstly
478	# check and make sure this snapshot's been mounted in current system.
479	#
480	typeset mtpt=""
481	if ismounted $snap; then
482		mtpt=$(get_prop mountpoint $snap)
483		(($? != 0)) && \
484			log_fail "get_prop mountpoint $snap failed."
485	fi
486
487	log_must $ZFS destroy $snap
488	[[ $mtpt != "" && -d $mtpt ]] && \
489		log_must $RM -rf $mtpt
490}
491
492#
493# Common function used to cleanup clone.
494#
495# $1 clone name
496#
497function destroy_clone
498{
499	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
500
501	if ! datasetexists $clone; then
502		log_fail "'$clone' does not existed."
503	fi
504
505	# With the same reason in destroy_snapshot
506	typeset mtpt=""
507	if ismounted $clone; then
508		mtpt=$(get_prop mountpoint $clone)
509		(($? != 0)) && \
510			log_fail "get_prop mountpoint $clone failed."
511	fi
512
513	log_must $ZFS destroy $clone
514	[[ $mtpt != "" && -d $mtpt ]] && \
515		log_must $RM -rf $mtpt
516}
517
518# Return 0 if a snapshot exists; $? otherwise
519#
520# $1 - snapshot name
521
522function snapexists
523{
524	$ZFS list -H -t snapshot "$1" > /dev/null 2>&1
525	return $?
526}
527
528#
529# Set a property to a certain value on a dataset.
530# Sets a property of the dataset to the value as passed in.
531# @param:
532#	$1 dataset who's property is being set
533#	$2 property to set
534#	$3 value to set property to
535# @return:
536#	0 if the property could be set.
537#	non-zero otherwise.
538# @use: ZFS
539#
540function dataset_setprop
541{
542	typeset fn=dataset_setprop
543
544	if (($# < 3)); then
545		log_note "$fn: Insufficient parameters (need 3, had $#)"
546		return 1
547	fi
548	typeset output=
549	output=$($ZFS set $2=$3 $1 2>&1)
550	typeset rv=$?
551	if ((rv != 0)); then
552		log_note "Setting property on $1 failed."
553		log_note "property $2=$3"
554		log_note "Return Code: $rv"
555		log_note "Output: $output"
556		return $rv
557	fi
558	return 0
559}
560
561#
562# Assign suite defined dataset properties.
563# This function is used to apply the suite's defined default set of
564# properties to a dataset.
565# @parameters: $1 dataset to use
566# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
567# @returns:
568#   0 if the dataset has been altered.
569#   1 if no pool name was passed in.
570#   2 if the dataset could not be found.
571#   3 if the dataset could not have it's properties set.
572#
573function dataset_set_defaultproperties
574{
575	typeset dataset="$1"
576
577	[[ -z $dataset ]] && return 1
578
579	typeset confset=
580	typeset -i found=0
581	for confset in $($ZFS list); do
582		if [[ $dataset = $confset ]]; then
583			found=1
584			break
585		fi
586	done
587	[[ $found -eq 0 ]] && return 2
588	if [[ -n $COMPRESSION_PROP ]]; then
589		dataset_setprop $dataset compression $COMPRESSION_PROP || \
590			return 3
591		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
592	fi
593	if [[ -n $CHECKSUM_PROP ]]; then
594		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
595			return 3
596		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
597	fi
598	return 0
599}
600
601#
602# Check a numeric assertion
603# @parameter: $@ the assertion to check
604# @output: big loud notice if assertion failed
605# @use: log_fail
606#
607function assert
608{
609	(($@)) || log_fail "$@"
610}
611
612#
613# Function to format partition size of a disk
614# Given a disk cxtxdx reduces all partitions
615# to 0 size
616#
617function zero_partitions #<whole_disk_name>
618{
619	typeset diskname=$1
620	typeset i
621
622	for i in 0 1 3 4 5 6 7
623	do
624		set_partition $i "" 0mb $diskname
625	done
626}
627
628#
629# Given a slice, size and disk, this function
630# formats the slice to the specified size.
631# Size should be specified with units as per
632# the `format` command requirements eg. 100mb 3gb
633#
634function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
635{
636	typeset -i slicenum=$1
637	typeset start=$2
638	typeset size=$3
639	typeset disk=$4
640	[[ -z $slicenum || -z $size || -z $disk ]] && \
641	    log_fail "The slice, size or disk name is unspecified."
642	typeset format_file=/var/tmp/format_in.$$
643
644	$ECHO "partition" >$format_file
645	$ECHO "$slicenum" >> $format_file
646	$ECHO "" >> $format_file
647	$ECHO "" >> $format_file
648	$ECHO "$start" >> $format_file
649	$ECHO "$size" >> $format_file
650	$ECHO "label" >> $format_file
651	$ECHO "" >> $format_file
652	$ECHO "q" >> $format_file
653	$ECHO "q" >> $format_file
654
655	$FORMAT -e -s -d $disk -f $format_file
656	typeset ret_val=$?
657	$RM -f $format_file
658	[[ $ret_val -ne 0 ]] && \
659	    log_fail "Unable to format $disk slice $slicenum to $size"
660	return 0
661}
662
663#
664# Get the end cyl of the given slice
665#
666function get_endslice #<disk> <slice>
667{
668	typeset disk=$1
669	typeset slice=$2
670	if [[ -z $disk || -z $slice ]] ; then
671		log_fail "The disk name or slice number is unspecified."
672	fi
673
674	disk=${disk#/dev/dsk/}
675	disk=${disk#/dev/rdsk/}
676	disk=${disk%s*}
677
678	typeset -i ratio=0
679	ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
680		$GREP "sectors\/cylinder" | \
681		$AWK '{print $2}')
682
683	if ((ratio == 0)); then
684		return
685	fi
686
687	typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
688		$NAWK -v token="$slice" '{if ($1==token) print $6}')
689
690	((endcyl = (endcyl + 1) / ratio))
691	echo $endcyl
692}
693
694
695#
696# Given a size,disk and total slice number,  this function formats the
697# disk slices from 0 to the total slice number with the same specified
698# size.
699#
700function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
701{
702	typeset -i i=0
703	typeset slice_size=$1
704	typeset disk_name=$2
705	typeset total_slices=$3
706	typeset cyl
707
708	zero_partitions $disk_name
709	while ((i < $total_slices)); do
710		if ((i == 2)); then
711			((i = i + 1))
712			continue
713		fi
714		set_partition $i "$cyl" $slice_size $disk_name
715		cyl=$(get_endslice $disk_name $i)
716		((i = i+1))
717	done
718}
719
720#
721# This function continues to write to a filenum number of files into dirnum
722# number of directories until either $FILE_WRITE returns an error or the
723# maximum number of files per directory have been written.
724#
725# Usage:
726# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
727#
728# Return value: 0 on success
729#		non 0 on error
730#
731# Where :
732#	destdir:    is the directory where everything is to be created under
733#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
734#	filenum:    the maximum number of files per subdirectory
735#	bytes:	    number of bytes to write
736#	num_writes: numer of types to write out bytes
737#	data:	    the data that will be writen
738#
739#	E.g.
740#	file_fs /testdir 20 25 1024 256 0
741#
742# Note: bytes * num_writes equals the size of the testfile
743#
744function fill_fs # destdir dirnum filenum bytes num_writes data
745{
746	typeset destdir=${1:-$TESTDIR}
747	typeset -i dirnum=${2:-50}
748	typeset -i filenum=${3:-50}
749	typeset -i bytes=${4:-8192}
750	typeset -i num_writes=${5:-10240}
751	typeset -i data=${6:-0}
752
753	typeset -i odirnum=1
754	typeset -i idirnum=0
755	typeset -i fn=0
756	typeset -i retval=0
757
758	log_must $MKDIR -p $destdir/$idirnum
759	while (($odirnum > 0)); do
760		if ((dirnum >= 0 && idirnum >= dirnum)); then
761			odirnum=0
762			break
763		fi
764		$FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
765		    -b $bytes -c $num_writes -d $data
766		retval=$?
767		if (($retval != 0)); then
768			odirnum=0
769			break
770		fi
771		if (($fn >= $filenum)); then
772			fn=0
773			((idirnum = idirnum + 1))
774			log_must $MKDIR -p $destdir/$idirnum
775		else
776			((fn = fn + 1))
777		fi
778	done
779	return $retval
780}
781
782#
783# Simple function to get the specified property. If unable to
784# get the property then exits.
785#
786# Note property is in 'parsable' format (-p)
787#
788function get_prop # property dataset
789{
790	typeset prop_val
791	typeset prop=$1
792	typeset dataset=$2
793
794	prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
795	if [[ $? -ne 0 ]]; then
796		log_note "Unable to get $prop property for dataset " \
797		"$dataset"
798		return 1
799	fi
800
801	$ECHO $prop_val
802	return 0
803}
804
805#
806# Simple function to get the specified property of pool. If unable to
807# get the property then exits.
808#
809function get_pool_prop # property pool
810{
811	typeset prop_val
812	typeset prop=$1
813	typeset pool=$2
814
815	if poolexists $pool ; then
816		prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
817			$AWK '{print $3}')
818		if [[ $? -ne 0 ]]; then
819			log_note "Unable to get $prop property for pool " \
820			"$pool"
821			return 1
822		fi
823	else
824		log_note "Pool $pool not exists."
825		return 1
826	fi
827
828	$ECHO $prop_val
829	return 0
830}
831
832# Return 0 if a pool exists; $? otherwise
833#
834# $1 - pool name
835
836function poolexists
837{
838	typeset pool=$1
839
840	if [[ -z $pool ]]; then
841		log_note "No pool name given."
842		return 1
843	fi
844
845	$ZPOOL get name "$pool" > /dev/null 2>&1
846	return $?
847}
848
849# Return 0 if all the specified datasets exist; $? otherwise
850#
851# $1-n  dataset name
852function datasetexists
853{
854	if (($# == 0)); then
855		log_note "No dataset name given."
856		return 1
857	fi
858
859	while (($# > 0)); do
860		$ZFS get name $1 > /dev/null 2>&1 || \
861			return $?
862		shift
863	done
864
865	return 0
866}
867
868# return 0 if none of the specified datasets exists, otherwise return 1.
869#
870# $1-n  dataset name
871function datasetnonexists
872{
873	if (($# == 0)); then
874		log_note "No dataset name given."
875		return 1
876	fi
877
878	while (($# > 0)); do
879		$ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
880		    && return 1
881		shift
882	done
883
884	return 0
885}
886
887#
888# Given a mountpoint, or a dataset name, determine if it is shared.
889#
890# Returns 0 if shared, 1 otherwise.
891#
892function is_shared
893{
894	typeset fs=$1
895	typeset mtpt
896
897	if [[ $fs != "/"* ]] ; then
898		if datasetnonexists "$fs" ; then
899			return 1
900		else
901			mtpt=$(get_prop mountpoint "$fs")
902			case $mtpt in
903				none|legacy|-) return 1
904					;;
905				*)	fs=$mtpt
906					;;
907			esac
908		fi
909	fi
910
911	for mtpt in `$SHARE | $AWK '{print $2}'` ; do
912		if [[ $mtpt == $fs ]] ; then
913			return 0
914		fi
915	done
916
917	typeset stat=$($SVCS -H -o STA nfs/server:default)
918	if [[ $stat != "ON" ]]; then
919		log_note "Current nfs/server status: $stat"
920	fi
921
922	return 1
923}
924
925#
926# Given a mountpoint, determine if it is not shared.
927#
928# Returns 0 if not shared, 1 otherwise.
929#
930function not_shared
931{
932	typeset fs=$1
933
934	is_shared $fs
935	if (($? == 0)); then
936		return 1
937	fi
938
939	return 0
940}
941
942#
943# Helper function to unshare a mountpoint.
944#
945function unshare_fs #fs
946{
947	typeset fs=$1
948
949	is_shared $fs
950	if (($? == 0)); then
951		log_must $ZFS unshare $fs
952	fi
953
954	return 0
955}
956
957#
958# Check NFS server status and trigger it online.
959#
960function setup_nfs_server
961{
962	# Cannot share directory in non-global zone.
963	#
964	if ! is_global_zone; then
965		log_note "Cannot trigger NFS server by sharing in LZ."
966		return
967	fi
968
969	typeset nfs_fmri="svc:/network/nfs/server:default"
970	if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
971		#
972		# Only really sharing operation can enable NFS server
973		# to online permanently.
974		#
975		typeset dummy=/tmp/dummy
976
977		if [[ -d $dummy ]]; then
978			log_must $RM -rf $dummy
979		fi
980
981		log_must $MKDIR $dummy
982		log_must $SHARE $dummy
983
984		#
985		# Waiting for fmri's status to be the final status.
986		# Otherwise, in transition, an asterisk (*) is appended for
987		# instances, unshare will reverse status to 'DIS' again.
988		#
989		# Waiting for 1's at least.
990		#
991		log_must $SLEEP 1
992		timeout=10
993		while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
994		do
995			log_must $SLEEP 1
996
997			((timeout -= 1))
998		done
999
1000		log_must $UNSHARE $dummy
1001		log_must $RM -rf $dummy
1002	fi
1003
1004	log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1005}
1006
1007#
1008# To verify whether calling process is in global zone
1009#
1010# Return 0 if in global zone, 1 in non-global zone
1011#
1012function is_global_zone
1013{
1014	typeset cur_zone=$($ZONENAME 2>/dev/null)
1015	if [[ $cur_zone != "global" ]]; then
1016		return 1
1017	fi
1018	return 0
1019}
1020
1021#
1022# Verify whether test is permitted to run from
1023# global zone, local zone, or both
1024#
1025# $1 zone limit, could be "global", "local", or "both"(no limit)
1026#
1027# Return 0 if permitted, otherwise exit with log_unsupported
1028#
1029function verify_runnable # zone limit
1030{
1031	typeset limit=$1
1032
1033	[[ -z $limit ]] && return 0
1034
1035	if is_global_zone ; then
1036		case $limit in
1037			global|both)
1038				;;
1039			local)	log_unsupported "Test is unable to run from "\
1040					"global zone."
1041				;;
1042			*)	log_note "Warning: unknown limit $limit - " \
1043					"use both."
1044				;;
1045		esac
1046	else
1047		case $limit in
1048			local|both)
1049				;;
1050			global)	log_unsupported "Test is unable to run from "\
1051					"local zone."
1052				;;
1053			*)	log_note "Warning: unknown limit $limit - " \
1054					"use both."
1055				;;
1056		esac
1057
1058		reexport_pool
1059	fi
1060
1061	return 0
1062}
1063
1064# Return 0 if create successfully or the pool exists; $? otherwise
1065# Note: In local zones, this function should return 0 silently.
1066#
1067# $1 - pool name
1068# $2-n - [keyword] devs_list
1069
1070function create_pool #pool devs_list
1071{
1072	typeset pool=${1%%/*}
1073
1074	shift
1075
1076	if [[ -z $pool ]]; then
1077		log_note "Missing pool name."
1078		return 1
1079	fi
1080
1081	if poolexists $pool ; then
1082		destroy_pool $pool
1083	fi
1084
1085	if is_global_zone ; then
1086		[[ -d /$pool ]] && $RM -rf /$pool
1087		log_must $ZPOOL create -f $pool $@
1088	fi
1089
1090	return 0
1091}
1092
1093# Return 0 if destroy successfully or the pool exists; $? otherwise
1094# Note: In local zones, this function should return 0 silently.
1095#
1096# $1 - pool name
1097# Destroy pool with the given parameters.
1098
1099function destroy_pool #pool
1100{
1101	typeset pool=${1%%/*}
1102	typeset mtpt
1103
1104	if [[ -z $pool ]]; then
1105		log_note "No pool name given."
1106		return 1
1107	fi
1108
1109	if is_global_zone ; then
1110		if poolexists "$pool" ; then
1111			mtpt=$(get_prop mountpoint "$pool")
1112
1113			# At times, syseventd activity can cause attempts to
1114			# destroy a pool to fail with EBUSY. We retry a few
1115			# times allowing failures before requiring the destroy
1116			# to succeed.
1117			typeset -i wait_time=10 ret=1 count=0
1118			must=""
1119			while [[ $ret -ne 0 ]]; do
1120				$must $ZPOOL destroy -f $pool
1121				ret=$?
1122				[[ $ret -eq 0 ]] && break
1123				log_note "zpool destroy failed with $ret"
1124				[[ count++ -ge 7 ]] && must=log_must
1125				$SLEEP $wait_time
1126			done
1127
1128			[[ -d $mtpt ]] && \
1129				log_must $RM -rf $mtpt
1130		else
1131			log_note "Pool does not exist. ($pool)"
1132			return 1
1133		fi
1134	fi
1135
1136	return 0
1137}
1138
1139#
1140# Firstly, create a pool with 5 datasets. Then, create a single zone and
1141# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1142# and a zvol device to the zone.
1143#
1144# $1 zone name
1145# $2 zone root directory prefix
1146# $3 zone ip
1147#
1148function zfs_zones_setup #zone_name zone_root zone_ip
1149{
1150	typeset zone_name=${1:-$(hostname)-z}
1151	typeset zone_root=${2:-"/zone_root"}
1152	typeset zone_ip=${3:-"10.1.1.10"}
1153	typeset prefix_ctr=$ZONE_CTR
1154	typeset pool_name=$ZONE_POOL
1155	typeset -i cntctr=5
1156	typeset -i i=0
1157
1158	# Create pool and 5 container within it
1159	#
1160	[[ -d /$pool_name ]] && $RM -rf /$pool_name
1161	log_must $ZPOOL create -f $pool_name $DISKS
1162	while ((i < cntctr)); do
1163		log_must $ZFS create $pool_name/$prefix_ctr$i
1164		((i += 1))
1165	done
1166
1167	# create a zvol
1168	log_must $ZFS create -V 1g $pool_name/zone_zvol
1169
1170	#
1171	# If current system support slog, add slog device for pool
1172	#
1173	if verify_slog_support ; then
1174		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1175		log_must $MKFILE 100M $sdevs
1176		log_must $ZPOOL add $pool_name log mirror $sdevs
1177	fi
1178
1179	# this isn't supported just yet.
1180	# Create a filesystem. In order to add this to
1181	# the zone, it must have it's mountpoint set to 'legacy'
1182	# log_must $ZFS create $pool_name/zfs_filesystem
1183	# log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1184
1185	[[ -d $zone_root ]] && \
1186		log_must $RM -rf $zone_root/$zone_name
1187	[[ ! -d $zone_root ]] && \
1188		log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1189
1190	# Create zone configure file and configure the zone
1191	#
1192	typeset zone_conf=/tmp/zone_conf.$$
1193	$ECHO "create" > $zone_conf
1194	$ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1195	$ECHO "set autoboot=true" >> $zone_conf
1196	i=0
1197	while ((i < cntctr)); do
1198		$ECHO "add dataset" >> $zone_conf
1199		$ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1200			$zone_conf
1201		$ECHO "end" >> $zone_conf
1202		((i += 1))
1203	done
1204
1205	# add our zvol to the zone
1206	$ECHO "add device" >> $zone_conf
1207	$ECHO "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1208	$ECHO "end" >> $zone_conf
1209
1210	# add a corresponding zvol rdsk to the zone
1211	$ECHO "add device" >> $zone_conf
1212	$ECHO "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1213	$ECHO "end" >> $zone_conf
1214
1215	# once it's supported, we'll add our filesystem to the zone
1216	# $ECHO "add fs" >> $zone_conf
1217	# $ECHO "set type=zfs" >> $zone_conf
1218	# $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1219	# $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1220	# $ECHO "end" >> $zone_conf
1221
1222	$ECHO "verify" >> $zone_conf
1223	$ECHO "commit" >> $zone_conf
1224	log_must $ZONECFG -z $zone_name -f $zone_conf
1225	log_must $RM -f $zone_conf
1226
1227	# Install the zone
1228	$ZONEADM -z $zone_name install
1229	if (($? == 0)); then
1230		log_note "SUCCESS: $ZONEADM -z $zone_name install"
1231	else
1232		log_fail "FAIL: $ZONEADM -z $zone_name install"
1233	fi
1234
1235	# Install sysidcfg file
1236	#
1237	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1238	$ECHO "system_locale=C" > $sysidcfg
1239	$ECHO  "terminal=dtterm" >> $sysidcfg
1240	$ECHO  "network_interface=primary {" >> $sysidcfg
1241	$ECHO  "hostname=$zone_name" >> $sysidcfg
1242	$ECHO  "}" >> $sysidcfg
1243	$ECHO  "name_service=NONE" >> $sysidcfg
1244	$ECHO  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1245	$ECHO  "security_policy=NONE" >> $sysidcfg
1246	$ECHO  "timezone=US/Eastern" >> $sysidcfg
1247
1248	# Boot this zone
1249	log_must $ZONEADM -z $zone_name boot
1250}
1251
1252#
1253# Reexport TESTPOOL & TESTPOOL(1-4)
1254#
1255function reexport_pool
1256{
1257	typeset -i cntctr=5
1258	typeset -i i=0
1259
1260	while ((i < cntctr)); do
1261		if ((i == 0)); then
1262			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1263			if ! ismounted $TESTPOOL; then
1264				log_must $ZFS mount $TESTPOOL
1265			fi
1266		else
1267			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1268			if eval ! ismounted \$TESTPOOL$i; then
1269				log_must eval $ZFS mount \$TESTPOOL$i
1270			fi
1271		fi
1272		((i += 1))
1273	done
1274}
1275
1276#
1277# Verify a given disk is online or offline
1278#
1279# Return 0 is pool/disk matches expected state, 1 otherwise
1280#
1281function check_state # pool disk state{online,offline}
1282{
1283	typeset pool=$1
1284	typeset disk=${2#/dev/dsk/}
1285	typeset state=$3
1286
1287	$ZPOOL status -v $pool | grep "$disk"  \
1288	    | grep -i "$state" > /dev/null 2>&1
1289
1290	return $?
1291}
1292
1293#
1294# Get the mountpoint of snapshot
1295# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1296# as its mountpoint
1297#
1298function snapshot_mountpoint
1299{
1300	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1301
1302	if [[ $dataset != *@* ]]; then
1303		log_fail "Error name of snapshot '$dataset'."
1304	fi
1305
1306	typeset fs=${dataset%@*}
1307	typeset snap=${dataset#*@}
1308
1309	if [[ -z $fs || -z $snap ]]; then
1310		log_fail "Error name of snapshot '$dataset'."
1311	fi
1312
1313	$ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1314}
1315
1316#
1317# Given a pool and file system, this function will verify the file system
1318# using the zdb internal tool. Note that the pool is exported and imported
1319# to ensure it has consistent state.
1320#
1321function verify_filesys # pool filesystem dir
1322{
1323	typeset pool="$1"
1324	typeset filesys="$2"
1325	typeset zdbout="/tmp/zdbout.$$"
1326
1327	shift
1328	shift
1329	typeset dirs=$@
1330	typeset search_path=""
1331
1332	log_note "Calling $ZDB to verify filesystem '$filesys'"
1333	$ZFS unmount -a > /dev/null 2>&1
1334	log_must $ZPOOL export $pool
1335
1336	if [[ -n $dirs ]] ; then
1337		for dir in $dirs ; do
1338			search_path="$search_path -d $dir"
1339		done
1340	fi
1341
1342	log_must $ZPOOL import $search_path $pool
1343
1344	$ZDB -cudi $filesys > $zdbout 2>&1
1345	if [[ $? != 0 ]]; then
1346		log_note "Output: $ZDB -cudi $filesys"
1347		$CAT $zdbout
1348		log_fail "$ZDB detected errors with: '$filesys'"
1349	fi
1350
1351	log_must $ZFS mount -a
1352	log_must $RM -rf $zdbout
1353}
1354
1355#
1356# Given a pool, and this function list all disks in the pool
1357#
1358function get_disklist # pool
1359{
1360	typeset disklist=""
1361
1362	disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1363	    $GREP -v "\-\-\-\-\-" | \
1364	    $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1365
1366	$ECHO $disklist
1367}
1368
1369# /**
1370#  This function kills a given list of processes after a time period. We use
1371#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1372#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1373#  would be listed as FAIL, which we don't want : we're happy with stress tests
1374#  running for a certain amount of time, then finishing.
1375#
1376# @param $1 the time in seconds after which we should terminate these processes
1377# @param $2..$n the processes we wish to terminate.
1378# */
1379function stress_timeout
1380{
1381	typeset -i TIMEOUT=$1
1382	shift
1383	typeset cpids="$@"
1384
1385	log_note "Waiting for child processes($cpids). " \
1386		"It could last dozens of minutes, please be patient ..."
1387	log_must $SLEEP $TIMEOUT
1388
1389	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1390	typeset pid
1391	for pid in $cpids; do
1392		$PS -p $pid > /dev/null 2>&1
1393		if (($? == 0)); then
1394			log_must $KILL -USR1 $pid
1395		fi
1396	done
1397}
1398
1399#
1400# Verify a given hotspare disk is inuse or avail
1401#
1402# Return 0 is pool/disk matches expected state, 1 otherwise
1403#
1404function check_hotspare_state # pool disk state{inuse,avail}
1405{
1406	typeset pool=$1
1407	typeset disk=${2#/dev/dsk/}
1408	typeset state=$3
1409
1410	cur_state=$(get_device_state $pool $disk "spares")
1411
1412	if [[ $state != ${cur_state} ]]; then
1413		return 1
1414	fi
1415	return 0
1416}
1417
1418#
1419# Verify a given slog disk is inuse or avail
1420#
1421# Return 0 is pool/disk matches expected state, 1 otherwise
1422#
1423function check_slog_state # pool disk state{online,offline,unavail}
1424{
1425	typeset pool=$1
1426	typeset disk=${2#/dev/dsk/}
1427	typeset state=$3
1428
1429	cur_state=$(get_device_state $pool $disk "logs")
1430
1431	if [[ $state != ${cur_state} ]]; then
1432		return 1
1433	fi
1434	return 0
1435}
1436
1437#
1438# Verify a given vdev disk is inuse or avail
1439#
1440# Return 0 is pool/disk matches expected state, 1 otherwise
1441#
1442function check_vdev_state # pool disk state{online,offline,unavail}
1443{
1444	typeset pool=$1
1445	typeset disk=${2#/dev/dsk/}
1446	typeset state=$3
1447
1448	cur_state=$(get_device_state $pool $disk)
1449
1450	if [[ $state != ${cur_state} ]]; then
1451		return 1
1452	fi
1453	return 0
1454}
1455
1456#
1457# Check the output of 'zpool status -v <pool>',
1458# and to see if the content of <token> contain the <keyword> specified.
1459#
1460# Return 0 is contain, 1 otherwise
1461#
1462function check_pool_status # pool token keyword
1463{
1464	typeset pool=$1
1465	typeset token=$2
1466	typeset keyword=$3
1467
1468	$ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1469		($1==token) {print $0}' \
1470	| $GREP -i "$keyword" > /dev/null 2>&1
1471
1472	return $?
1473}
1474
1475#
1476# These 5 following functions are instance of check_pool_status()
1477#	is_pool_resilvering - to check if the pool is resilver in progress
1478#	is_pool_resilvered - to check if the pool is resilver completed
1479#	is_pool_scrubbing - to check if the pool is scrub in progress
1480#	is_pool_scrubbed - to check if the pool is scrub completed
1481#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1482#
1483function is_pool_resilvering #pool
1484{
1485	check_pool_status "$1" "scan" "resilver in progress since "
1486	return $?
1487}
1488
1489function is_pool_resilvered #pool
1490{
1491	check_pool_status "$1" "scan" "resilvered "
1492	return $?
1493}
1494
1495function is_pool_scrubbing #pool
1496{
1497	check_pool_status "$1" "scan" "scrub in progress since "
1498	return $?
1499}
1500
1501function is_pool_scrubbed #pool
1502{
1503	check_pool_status "$1" "scan" "scrub repaired"
1504	return $?
1505}
1506
1507function is_pool_scrub_stopped #pool
1508{
1509	check_pool_status "$1" "scan" "scrub canceled"
1510	return $?
1511}
1512
1513#
1514# Use create_pool()/destroy_pool() to clean up the infomation in
1515# in the given disk to avoid slice overlapping.
1516#
1517function cleanup_devices #vdevs
1518{
1519	typeset pool="foopool$$"
1520
1521	if poolexists $pool ; then
1522		destroy_pool $pool
1523	fi
1524
1525	create_pool $pool $@
1526	destroy_pool $pool
1527
1528	return 0
1529}
1530
1531#
1532# Verify the rsh connectivity to each remote host in RHOSTS.
1533#
1534# Return 0 if remote host is accessible; otherwise 1.
1535# $1 remote host name
1536# $2 username
1537#
1538function verify_rsh_connect #rhost, username
1539{
1540	typeset rhost=$1
1541	typeset username=$2
1542	typeset rsh_cmd="$RSH -n"
1543	typeset cur_user=
1544
1545	$GETENT hosts $rhost >/dev/null 2>&1
1546	if (($? != 0)); then
1547		log_note "$rhost cannot be found from" \
1548			"administrative database."
1549		return 1
1550	fi
1551
1552	$PING $rhost 3 >/dev/null 2>&1
1553	if (($? != 0)); then
1554		log_note "$rhost is not reachable."
1555		return 1
1556	fi
1557
1558	if ((${#username} != 0)); then
1559		rsh_cmd="$rsh_cmd -l $username"
1560		cur_user="given user \"$username\""
1561	else
1562		cur_user="current user \"`$LOGNAME`\""
1563	fi
1564
1565	 if ! $rsh_cmd $rhost $TRUE; then
1566		log_note "$RSH to $rhost is not accessible" \
1567			"with $cur_user."
1568		return 1
1569	fi
1570
1571	return 0
1572}
1573
1574#
1575# Verify the remote host connection via rsh after rebooting
1576# $1 remote host
1577#
1578function verify_remote
1579{
1580	rhost=$1
1581
1582	#
1583	# The following loop waits for the remote system rebooting.
1584	# Each iteration will wait for 150 seconds. there are
1585	# total 5 iterations, so the total timeout value will
1586	# be 12.5  minutes for the system rebooting. This number
1587	# is an approxiate number.
1588	#
1589	typeset -i count=0
1590	while ! verify_rsh_connect $rhost; do
1591		sleep 150
1592		((count = count + 1))
1593		if ((count > 5)); then
1594			return 1
1595		fi
1596	done
1597	return 0
1598}
1599
1600#
1601# Replacement function for /usr/bin/rsh. This function will include
1602# the /usr/bin/rsh and meanwhile return the execution status of the
1603# last command.
1604#
1605# $1 usrname passing down to -l option of /usr/bin/rsh
1606# $2 remote machine hostname
1607# $3... command string
1608#
1609
1610function rsh_status
1611{
1612	typeset ruser=$1
1613	typeset rhost=$2
1614	typeset -i ret=0
1615	typeset cmd_str=""
1616	typeset rsh_str=""
1617
1618	shift; shift
1619	cmd_str="$@"
1620
1621	err_file=/tmp/${rhost}.$$.err
1622	if ((${#ruser} == 0)); then
1623		rsh_str="$RSH -n"
1624	else
1625		rsh_str="$RSH -n -l $ruser"
1626	fi
1627
1628	$rsh_str $rhost /usr/bin/ksh -c "'$cmd_str; \
1629		print -u 2 \"status=\$?\"'" \
1630		>/dev/null 2>$err_file
1631	ret=$?
1632	if (($ret != 0)); then
1633		$CAT $err_file
1634		$RM -f $std_file $err_file
1635		log_fail  "$RSH itself failed with exit code $ret..."
1636	fi
1637
1638	 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1639		$CUT -d= -f2)
1640	(($ret != 0)) && $CAT $err_file >&2
1641
1642	$RM -f $err_file >/dev/null 2>&1
1643	return $ret
1644}
1645
1646#
1647# Get the SUNWstc-fs-zfs package installation path in a remote host
1648# $1 remote host name
1649#
1650function get_remote_pkgpath
1651{
1652	typeset rhost=$1
1653	typeset pkgpath=""
1654
1655	pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1656			$CUT -d: -f2")
1657
1658	$ECHO $pkgpath
1659}
1660
1661#/**
1662# A function to find and locate free disks on a system or from given
1663# disks as the parameter. It works by locating disks that are in use
1664# as swap devices and dump devices, and also disks listed in /etc/vfstab
1665#
1666# $@ given disks to find which are free, default is all disks in
1667# the test system
1668#
1669# @return a string containing the list of available disks
1670#*/
1671function find_disks
1672{
1673	sfi=/tmp/swaplist.$$
1674	dmpi=/tmp/dumpdev.$$
1675	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1676
1677	$SWAP -l > $sfi
1678	$DUMPADM > $dmpi 2>/dev/null
1679
1680# write an awk script that can process the output of format
1681# to produce a list of disks we know about. Note that we have
1682# to escape "$2" so that the shell doesn't interpret it while
1683# we're creating the awk script.
1684# -------------------
1685	$CAT > /tmp/find_disks.awk <<EOF
1686#!/bin/nawk -f
1687	BEGIN { FS="."; }
1688
1689	/^Specify disk/{
1690		searchdisks=0;
1691	}
1692
1693	{
1694		if (searchdisks && \$2 !~ "^$"){
1695			split(\$2,arr," ");
1696			print arr[1];
1697		}
1698	}
1699
1700	/^AVAILABLE DISK SELECTIONS:/{
1701		searchdisks=1;
1702	}
1703EOF
1704#---------------------
1705
1706	$CHMOD 755 /tmp/find_disks.awk
1707	disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
1708	$RM /tmp/find_disks.awk
1709
1710	unused=""
1711	for disk in $disks; do
1712	# Check for mounted
1713		$GREP "${disk}[sp]" /etc/mnttab >/dev/null
1714		(($? == 0)) && continue
1715	# Check for swap
1716		$GREP "${disk}[sp]" $sfi >/dev/null
1717		(($? == 0)) && continue
1718	# check for dump device
1719		$GREP "${disk}[sp]" $dmpi >/dev/null
1720		(($? == 0)) && continue
1721	# check to see if this disk hasn't been explicitly excluded
1722	# by a user-set environment variable
1723		$ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
1724		(($? == 0)) && continue
1725		unused_candidates="$unused_candidates $disk"
1726	done
1727	$RM $sfi
1728	$RM $dmpi
1729
1730# now just check to see if those disks do actually exist
1731# by looking for a device pointing to the first slice in
1732# each case. limit the number to max_finddisksnum
1733	count=0
1734	for disk in $unused_candidates; do
1735		if [ -b /dev/dsk/${disk}s0 ]; then
1736		if [ $count -lt $max_finddisksnum ]; then
1737			unused="$unused $disk"
1738			# do not impose limit if $@ is provided
1739			[[ -z $@ ]] && ((count = count + 1))
1740		fi
1741		fi
1742	done
1743
1744# finally, return our disk list
1745	$ECHO $unused
1746}
1747
1748#
1749# Add specified user to specified group
1750#
1751# $1 group name
1752# $2 user name
1753# $3 base of the homedir (optional)
1754#
1755function add_user #<group_name> <user_name> <basedir>
1756{
1757	typeset gname=$1
1758	typeset uname=$2
1759	typeset basedir=${3:-"/var/tmp"}
1760
1761	if ((${#gname} == 0 || ${#uname} == 0)); then
1762		log_fail "group name or user name are not defined."
1763	fi
1764
1765	log_must $USERADD -g $gname -d $basedir/$uname -m $uname
1766
1767	return 0
1768}
1769
1770#
1771# Delete the specified user.
1772#
1773# $1 login name
1774# $2 base of the homedir (optional)
1775#
1776function del_user #<logname> <basedir>
1777{
1778	typeset user=$1
1779	typeset basedir=${2:-"/var/tmp"}
1780
1781	if ((${#user} == 0)); then
1782		log_fail "login name is necessary."
1783	fi
1784
1785	if $ID $user > /dev/null 2>&1; then
1786		log_must $USERDEL $user
1787	fi
1788
1789	[[ -d $basedir/$user ]] && $RM -fr $basedir/$user
1790
1791	return 0
1792}
1793
1794#
1795# Select valid gid and create specified group.
1796#
1797# $1 group name
1798#
1799function add_group #<group_name>
1800{
1801	typeset group=$1
1802
1803	if ((${#group} == 0)); then
1804		log_fail "group name is necessary."
1805	fi
1806
1807	# Assign 100 as the base gid
1808	typeset -i gid=100
1809	while true; do
1810		$GROUPADD -g $gid $group > /dev/null 2>&1
1811		typeset -i ret=$?
1812		case $ret in
1813			0) return 0 ;;
1814			# The gid is not  unique
1815			4) ((gid += 1)) ;;
1816			*) return 1 ;;
1817		esac
1818	done
1819}
1820
1821#
1822# Delete the specified group.
1823#
1824# $1 group name
1825#
1826function del_group #<group_name>
1827{
1828	typeset grp=$1
1829	if ((${#grp} == 0)); then
1830		log_fail "group name is necessary."
1831	fi
1832
1833	$GROUPMOD -n $grp $grp > /dev/null 2>&1
1834	typeset -i ret=$?
1835	case $ret in
1836		# Group does not exist.
1837		6) return 0 ;;
1838		# Name already exists as a group name
1839		9) log_must $GROUPDEL $grp ;;
1840		*) return 1 ;;
1841	esac
1842
1843	return 0
1844}
1845
1846#
1847# This function will return true if it's safe to destroy the pool passed
1848# as argument 1. It checks for pools based on zvols and files, and also
1849# files contained in a pool that may have a different mountpoint.
1850#
1851function safe_to_destroy_pool { # $1 the pool name
1852
1853	typeset pool=""
1854	typeset DONT_DESTROY=""
1855
1856	# We check that by deleting the $1 pool, we're not
1857	# going to pull the rug out from other pools. Do this
1858	# by looking at all other pools, ensuring that they
1859	# aren't built from files or zvols contained in this pool.
1860
1861	for pool in $($ZPOOL list -H -o name)
1862	do
1863		ALTMOUNTPOOL=""
1864
1865		# this is a list of the top-level directories in each of the
1866		# files that make up the path to the files the pool is based on
1867		FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
1868			$AWK '{print $1}')
1869
1870		# this is a list of the zvols that make up the pool
1871		ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/dsk/$1$" \
1872		    | $AWK '{print $1}')
1873
1874		# also want to determine if it's a file-based pool using an
1875		# alternate mountpoint...
1876		POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
1877					$GREP / | $AWK '{print $1}' | \
1878					$AWK -F/ '{print $2}' | $GREP -v "dev")
1879
1880		for pooldir in $POOL_FILE_DIRS
1881		do
1882			OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
1883					$GREP "${pooldir}$" | $AWK '{print $1}')
1884
1885			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1886		done
1887
1888
1889		if [ ! -z "$ZVOLPOOL" ]
1890		then
1891			DONT_DESTROY="true"
1892			log_note "Pool $pool is built from $ZVOLPOOL on $1"
1893		fi
1894
1895		if [ ! -z "$FILEPOOL" ]
1896		then
1897			DONT_DESTROY="true"
1898			log_note "Pool $pool is built from $FILEPOOL on $1"
1899		fi
1900
1901		if [ ! -z "$ALTMOUNTPOOL" ]
1902		then
1903			DONT_DESTROY="true"
1904			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
1905		fi
1906	done
1907
1908	if [ -z "${DONT_DESTROY}" ]
1909	then
1910		return 0
1911	else
1912		log_note "Warning: it is not safe to destroy $1!"
1913		return 1
1914	fi
1915}
1916
1917#
1918# Get the available ZFS compression options
1919# $1 option type zfs_set|zfs_compress
1920#
1921function get_compress_opts
1922{
1923	typeset COMPRESS_OPTS
1924	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
1925			gzip-6 gzip-7 gzip-8 gzip-9"
1926
1927	if [[ $1 == "zfs_compress" ]] ; then
1928		COMPRESS_OPTS="on lzjb"
1929	elif [[ $1 == "zfs_set" ]] ; then
1930		COMPRESS_OPTS="on off lzjb"
1931	fi
1932	typeset valid_opts="$COMPRESS_OPTS"
1933	$ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
1934	if [[ $? -eq 0 ]]; then
1935		valid_opts="$valid_opts $GZIP_OPTS"
1936	fi
1937	$ECHO "$valid_opts"
1938}
1939
1940#
1941# Verify zfs operation with -p option work as expected
1942# $1 operation, value could be create, clone or rename
1943# $2 dataset type, value could be fs or vol
1944# $3 dataset name
1945# $4 new dataset name
1946#
1947function verify_opt_p_ops
1948{
1949	typeset ops=$1
1950	typeset datatype=$2
1951	typeset dataset=$3
1952	typeset newdataset=$4
1953
1954	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
1955		log_fail "$datatype is not supported."
1956	fi
1957
1958	# check parameters accordingly
1959	case $ops in
1960		create)
1961			newdataset=$dataset
1962			dataset=""
1963			if [[ $datatype == "vol" ]]; then
1964				ops="create -V $VOLSIZE"
1965			fi
1966			;;
1967		clone)
1968			if [[ -z $newdataset ]]; then
1969				log_fail "newdataset should not be empty" \
1970					"when ops is $ops."
1971			fi
1972			log_must datasetexists $dataset
1973			log_must snapexists $dataset
1974			;;
1975		rename)
1976			if [[ -z $newdataset ]]; then
1977				log_fail "newdataset should not be empty" \
1978					"when ops is $ops."
1979			fi
1980			log_must datasetexists $dataset
1981			log_mustnot snapexists $dataset
1982			;;
1983		*)
1984			log_fail "$ops is not supported."
1985			;;
1986	esac
1987
1988	# make sure the upper level filesystem does not exist
1989	if datasetexists ${newdataset%/*} ; then
1990		log_must $ZFS destroy -rRf ${newdataset%/*}
1991	fi
1992
1993	# without -p option, operation will fail
1994	log_mustnot $ZFS $ops $dataset $newdataset
1995	log_mustnot datasetexists $newdataset ${newdataset%/*}
1996
1997	# with -p option, operation should succeed
1998	log_must $ZFS $ops -p $dataset $newdataset
1999	if ! datasetexists $newdataset ; then
2000		log_fail "-p option does not work for $ops"
2001	fi
2002
2003	# when $ops is create or clone, redo the operation still return zero
2004	if [[ $ops != "rename" ]]; then
2005		log_must $ZFS $ops -p $dataset $newdataset
2006	fi
2007
2008	return 0
2009}
2010
2011#
2012# Get configuration of pool
2013# $1 pool name
2014# $2 config name
2015#
2016function get_config
2017{
2018	typeset pool=$1
2019	typeset config=$2
2020	typeset alt_root
2021
2022	if ! poolexists "$pool" ; then
2023		return 1
2024	fi
2025	alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2026	if [[ $alt_root == "-" ]]; then
2027		value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2028		    '{print $2}')
2029	else
2030		value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2031		    '{print $2}')
2032	fi
2033	if [[ -n $value ]] ; then
2034		value=${value#'}
2035		value=${value%'}
2036	fi
2037	echo $value
2038
2039	return 0
2040}
2041
2042#
2043# Privated function. Random select one of items from arguments.
2044#
2045# $1 count
2046# $2-n string
2047#
2048function _random_get
2049{
2050	typeset cnt=$1
2051	shift
2052
2053	typeset str="$@"
2054	typeset -i ind
2055	((ind = RANDOM % cnt + 1))
2056
2057	typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2058	$ECHO $ret
2059}
2060
2061#
2062# Random select one of item from arguments which include NONE string
2063#
2064function random_get_with_non
2065{
2066	typeset -i cnt=$#
2067	((cnt =+ 1))
2068
2069	_random_get "$cnt" "$@"
2070}
2071
2072#
2073# Random select one of item from arguments which doesn't include NONE string
2074#
2075function random_get
2076{
2077	_random_get "$#" "$@"
2078}
2079
2080#
2081# Detect if the current system support slog
2082#
2083function verify_slog_support
2084{
2085	typeset dir=/tmp/disk.$$
2086	typeset pool=foo.$$
2087	typeset vdev=$dir/a
2088	typeset sdev=$dir/b
2089
2090	$MKDIR -p $dir
2091	$MKFILE 64M $vdev $sdev
2092
2093	typeset -i ret=0
2094	if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2095		ret=1
2096	fi
2097	$RM -r $dir
2098
2099	return $ret
2100}
2101
2102#
2103# The function will generate a dataset name with specific length
2104# $1, the length of the name
2105# $2, the base string to construct the name
2106#
2107function gen_dataset_name
2108{
2109	typeset -i len=$1
2110	typeset basestr="$2"
2111	typeset -i baselen=${#basestr}
2112	typeset -i iter=0
2113	typeset l_name=""
2114
2115	if ((len % baselen == 0)); then
2116		((iter = len / baselen))
2117	else
2118		((iter = len / baselen + 1))
2119	fi
2120	while ((iter > 0)); do
2121		l_name="${l_name}$basestr"
2122
2123		((iter -= 1))
2124	done
2125
2126	$ECHO $l_name
2127}
2128
2129#
2130# Get cksum tuple of dataset
2131# $1 dataset name
2132#
2133# sample zdb output:
2134# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2135# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2136# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2137# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2138function datasetcksum
2139{
2140	typeset cksum
2141	$SYNC
2142	cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2143		| $AWK -F= '{print $7}')
2144	$ECHO $cksum
2145}
2146
2147#
2148# Get cksum of file
2149# #1 file path
2150#
2151function checksum
2152{
2153	typeset cksum
2154	cksum=$($CKSUM $1 | $AWK '{print $1}')
2155	$ECHO $cksum
2156}
2157
2158#
2159# Get the given disk/slice state from the specific field of the pool
2160#
2161function get_device_state #pool disk field("", "spares","logs")
2162{
2163	typeset pool=$1
2164	typeset disk=${2#/dev/dsk/}
2165	typeset field=${3:-$pool}
2166
2167	state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2168		$NAWK -v device=$disk -v pool=$pool -v field=$field \
2169		'BEGIN {startconfig=0; startfield=0; }
2170		/config:/ {startconfig=1}
2171		(startconfig==1) && ($1==field) {startfield=1; next;}
2172		(startfield==1) && ($1==device) {print $2; exit;}
2173		(startfield==1) &&
2174		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2175	echo $state
2176}
2177
2178
2179#
2180# print the given directory filesystem type
2181#
2182# $1 directory name
2183#
2184function get_fstype
2185{
2186	typeset dir=$1
2187
2188	if [[ -z $dir ]]; then
2189		log_fail "Usage: get_fstype <directory>"
2190	fi
2191
2192	#
2193	#  $ df -n /
2194	#  /		  : ufs
2195	#
2196	$DF -n $dir | $AWK '{print $3}'
2197}
2198
2199#
2200# Given a disk, label it to VTOC regardless what label was on the disk
2201# $1 disk
2202#
2203function labelvtoc
2204{
2205	typeset disk=$1
2206	if [[ -z $disk ]]; then
2207		log_fail "The disk name is unspecified."
2208	fi
2209	typeset label_file=/var/tmp/labelvtoc.$$
2210	typeset arch=$($UNAME -p)
2211
2212	if [[ $arch == "i386" ]]; then
2213		$ECHO "label" > $label_file
2214		$ECHO "0" >> $label_file
2215		$ECHO "" >> $label_file
2216		$ECHO "q" >> $label_file
2217		$ECHO "q" >> $label_file
2218
2219		$FDISK -B $disk >/dev/null 2>&1
2220		# wait a while for fdisk finishes
2221		$SLEEP 60
2222	elif [[ $arch == "sparc" ]]; then
2223		$ECHO "label" > $label_file
2224		$ECHO "0" >> $label_file
2225		$ECHO "" >> $label_file
2226		$ECHO "" >> $label_file
2227		$ECHO "" >> $label_file
2228		$ECHO "q" >> $label_file
2229	else
2230		log_fail "unknown arch type"
2231	fi
2232
2233	$FORMAT -e -s -d $disk -f $label_file
2234	typeset -i ret_val=$?
2235	$RM -f $label_file
2236	#
2237	# wait the format to finish
2238	#
2239	$SLEEP 60
2240	if ((ret_val != 0)); then
2241		log_fail "unable to label $disk as VTOC."
2242	fi
2243
2244	return 0
2245}
2246
2247#
2248# check if the system was installed as zfsroot or not
2249# return: 0 ture, otherwise false
2250#
2251function is_zfsroot
2252{
2253	$DF -n / | $GREP zfs > /dev/null 2>&1
2254	return $?
2255}
2256
2257#
2258# get the root filesystem name if it's zfsroot system.
2259#
2260# return: root filesystem name
2261function get_rootfs
2262{
2263	typeset rootfs=""
2264	rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2265		/etc/mnttab)
2266	if [[ -z "$rootfs" ]]; then
2267		log_fail "Can not get rootfs"
2268	fi
2269	$ZFS list $rootfs > /dev/null 2>&1
2270	if (($? == 0)); then
2271		$ECHO $rootfs
2272	else
2273		log_fail "This is not a zfsroot system."
2274	fi
2275}
2276
2277#
2278# get the rootfs's pool name
2279# return:
2280#       rootpool name
2281#
2282function get_rootpool
2283{
2284	typeset rootfs=""
2285	typeset rootpool=""
2286	rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2287		 /etc/mnttab)
2288	if [[ -z "$rootfs" ]]; then
2289		log_fail "Can not get rootpool"
2290	fi
2291	$ZFS list $rootfs > /dev/null 2>&1
2292	if (($? == 0)); then
2293		rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2294		$ECHO $rootpool
2295	else
2296		log_fail "This is not a zfsroot system."
2297	fi
2298}
2299
2300#
2301# Check if the given device is physical device
2302#
2303function is_physical_device #device
2304{
2305	typeset device=${1#/dev/dsk/}
2306	device=${device#/dev/rdsk/}
2307
2308	$ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2309	return $?
2310}
2311
2312#
2313# Get the directory path of given device
2314#
2315function get_device_dir #device
2316{
2317	typeset device=$1
2318
2319	if ! $(is_physical_device $device) ; then
2320		if [[ $device != "/" ]]; then
2321			device=${device%/*}
2322		fi
2323		$ECHO $device
2324	else
2325		$ECHO "/dev/dsk"
2326	fi
2327}
2328
2329#
2330# Get the package name
2331#
2332function get_package_name
2333{
2334	typeset dirpath=${1:-$STC_NAME}
2335
2336	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2337}
2338
2339#
2340# Get the word numbers from a string separated by white space
2341#
2342function get_word_count
2343{
2344	$ECHO $1 | $WC -w
2345}
2346
2347#
2348# To verify if the require numbers of disks is given
2349#
2350function verify_disk_count
2351{
2352	typeset -i min=${2:-1}
2353
2354	typeset -i count=$(get_word_count "$1")
2355
2356	if ((count < min)); then
2357		log_untested "A minimum of $min disks is required to run." \
2358			" You specified $count disk(s)"
2359	fi
2360}
2361
2362function ds_is_volume
2363{
2364	typeset type=$(get_prop type $1)
2365	[[ $type = "volume" ]] && return 0
2366	return 1
2367}
2368
2369function ds_is_filesystem
2370{
2371	typeset type=$(get_prop type $1)
2372	[[ $type = "filesystem" ]] && return 0
2373	return 1
2374}
2375
2376function ds_is_snapshot
2377{
2378	typeset type=$(get_prop type $1)
2379	[[ $type = "snapshot" ]] && return 0
2380	return 1
2381}
2382
2383#
2384# Check if Trusted Extensions are installed and enabled
2385#
2386function is_te_enabled
2387{
2388	$SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
2389	if (($? != 0)); then
2390		return 1
2391	else
2392		return 0
2393	fi
2394}
2395
2396# Utility function to determine if a system has multiple cpus.
2397function is_mp
2398{
2399	(($($PSRINFO | $WC -l) > 1))
2400}
2401
2402function get_cpu_freq
2403{
2404	$PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
2405}
2406
2407# Run the given command as the user provided.
2408function user_run
2409{
2410	typeset user=$1
2411	shift
2412
2413	eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
2414	return $?
2415}
2416
2417#
2418# Check if the pool contains the specified vdevs
2419#
2420# $1 pool
2421# $2..n <vdev> ...
2422#
2423# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2424# vdevs is not in the pool, and 2 if pool name is missing.
2425#
2426function vdevs_in_pool
2427{
2428	typeset pool=$1
2429	typeset vdev
2430
2431        if [[ -z $pool ]]; then
2432                log_note "Missing pool name."
2433                return 2
2434        fi
2435
2436	shift
2437
2438	typeset tmpfile=$($MKTEMP)
2439	$ZPOOL list -Hv "$pool" >$tmpfile
2440	for vdev in $@; do
2441		$GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2442		[[ $? -ne 0 ]] && return 1
2443	done
2444
2445	$RM -f $tmpfile
2446
2447	return 0;
2448}
2449
2450function get_max
2451{
2452	typeset -l i max=$1
2453	shift
2454
2455	for i in "$@"; do
2456		max=$(echo $((max > i ? max : i)))
2457	done
2458
2459	echo $max
2460}
2461
2462function get_min
2463{
2464	typeset -l i min=$1
2465	shift
2466
2467	for i in "$@"; do
2468		min=$(echo $((min < i ? min : i)))
2469	done
2470
2471	echo $min
2472}
2473