xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision e153cda9f9660e385e8f468253f80e59f5d454d7)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27# Copyright (c) 2017 Datto Inc.
28#
29
30. ${STF_TOOLS}/contrib/include/logapi.shlib
31
32# Determine whether a dataset is mounted
33#
34# $1 dataset name
35# $2 filesystem type; optional - defaulted to zfs
36#
37# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
38
39function ismounted
40{
41	typeset fstype=$2
42	[[ -z $fstype ]] && fstype=zfs
43	typeset out dir name ret
44
45	case $fstype in
46		zfs)
47			if [[ "$1" == "/"* ]] ; then
48				for out in $(zfs mount | awk '{print $2}'); do
49					[[ $1 == $out ]] && return 0
50				done
51			else
52				for out in $(zfs mount | awk '{print $1}'); do
53					[[ $1 == $out ]] && return 0
54				done
55			fi
56		;;
57		ufs|nfs)
58			out=$(df -F $fstype $1 2>/dev/null)
59			ret=$?
60			(($ret != 0)) && return $ret
61
62			dir=${out%%\(*}
63			dir=${dir%% *}
64			name=${out##*\(}
65			name=${name%%\)*}
66			name=${name%% *}
67
68			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
69		;;
70	esac
71
72	return 1
73}
74
75# Return 0 if a dataset is mounted; 1 otherwise
76#
77# $1 dataset name
78# $2 filesystem type; optional - defaulted to zfs
79
80function mounted
81{
82	ismounted $1 $2
83	(($? == 0)) && return 0
84	return 1
85}
86
87# Return 0 if a dataset is unmounted; 1 otherwise
88#
89# $1 dataset name
90# $2 filesystem type; optional - defaulted to zfs
91
92function unmounted
93{
94	ismounted $1 $2
95	(($? == 1)) && return 0
96	return 1
97}
98
99# split line on ","
100#
101# $1 - line to split
102
103function splitline
104{
105	echo $1 | sed "s/,/ /g"
106}
107
108function default_setup
109{
110	default_setup_noexit "$@"
111
112	log_pass
113}
114
115#
116# Given a list of disks, setup storage pools and datasets.
117#
118function default_setup_noexit
119{
120	typeset disklist=$1
121	typeset container=$2
122	typeset volume=$3
123
124	if is_global_zone; then
125		if poolexists $TESTPOOL ; then
126			destroy_pool $TESTPOOL
127		fi
128		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
129		log_must zpool create -f $TESTPOOL $disklist
130	else
131		reexport_pool
132	fi
133
134	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
135	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
136
137	log_must zfs create $TESTPOOL/$TESTFS
138	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
139
140	if [[ -n $container ]]; then
141		rm -rf $TESTDIR1  || \
142			log_unresolved Could not remove $TESTDIR1
143		mkdir -p $TESTDIR1 || \
144			log_unresolved Could not create $TESTDIR1
145
146		log_must zfs create $TESTPOOL/$TESTCTR
147		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
148		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
149		log_must zfs set mountpoint=$TESTDIR1 \
150		    $TESTPOOL/$TESTCTR/$TESTFS1
151	fi
152
153	if [[ -n $volume ]]; then
154		if is_global_zone ; then
155			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
156		else
157			log_must zfs create $TESTPOOL/$TESTVOL
158		fi
159	fi
160}
161
162#
163# Given a list of disks, setup a storage pool, file system and
164# a container.
165#
166function default_container_setup
167{
168	typeset disklist=$1
169
170	default_setup "$disklist" "true"
171}
172
173#
174# Given a list of disks, setup a storage pool,file system
175# and a volume.
176#
177function default_volume_setup
178{
179	typeset disklist=$1
180
181	default_setup "$disklist" "" "true"
182}
183
184#
185# Given a list of disks, setup a storage pool,file system,
186# a container and a volume.
187#
188function default_container_volume_setup
189{
190	typeset disklist=$1
191
192	default_setup "$disklist" "true" "true"
193}
194
195#
196# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
197# filesystem
198#
199# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
200# $2 snapshot name. Default, $TESTSNAP
201#
202function create_snapshot
203{
204	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
205	typeset snap=${2:-$TESTSNAP}
206
207	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
208	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
209
210	if snapexists $fs_vol@$snap; then
211		log_fail "$fs_vol@$snap already exists."
212	fi
213	datasetexists $fs_vol || \
214		log_fail "$fs_vol must exist."
215
216	log_must zfs snapshot $fs_vol@$snap
217}
218
219#
220# Create a clone from a snapshot, default clone name is $TESTCLONE.
221#
222# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
223# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
224#
225function create_clone   # snapshot clone
226{
227	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
228	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
229
230	[[ -z $snap ]] && \
231		log_fail "Snapshot name is undefined."
232	[[ -z $clone ]] && \
233		log_fail "Clone name is undefined."
234
235	log_must zfs clone $snap $clone
236}
237
238#
239# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
240# filesystem.
241#
242# $1 Existing filesystem or volume name. Default, $TESTFS
243# $2 Existing snapshot name. Default, $TESTSNAP
244# $3 bookmark name. Default, $TESTBKMARK
245#
246function create_bookmark
247{
248	typeset fs_vol=${1:-$TESTFS}
249	typeset snap=${2:-$TESTSNAP}
250	typeset bkmark=${3:-$TESTBKMARK}
251
252	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
253	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
254	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
255
256	if bkmarkexists $fs_vol#$bkmark; then
257		log_fail "$fs_vol#$bkmark already exists."
258	fi
259	datasetexists $fs_vol || \
260		log_fail "$fs_vol must exist."
261	snapexists $fs_vol@$snap || \
262		log_fail "$fs_vol@$snap must exist."
263
264	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
265}
266
267#
268# Create a temporary clone result of an interrupted resumable 'zfs receive'
269# $1 Destination filesystem name. Must not exist, will be created as the result
270#    of this function along with its %recv temporary clone
271# $2 Source filesystem name. Must not exist, will be created and destroyed
272#
273function create_recv_clone
274{
275	typeset recvfs="$1"
276	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
277	typeset snap="$sendfs@snap1"
278	typeset incr="$sendfs@snap2"
279	typeset mountpoint="$TESTDIR/create_recv_clone"
280	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
281
282	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
283
284	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
285	datasetexists $sendfs && log_fail "Send filesystem must not exist."
286
287	log_must zfs create -o mountpoint="$mountpoint" $sendfs
288	log_must zfs snapshot $snap
289	log_must eval "zfs send $snap | zfs recv -u $recvfs"
290	log_must mkfile 1m "$mountpoint/data"
291	log_must zfs snapshot $incr
292	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
293	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
294	log_must zfs destroy -r $sendfs
295	log_must rm -f "$sendfile"
296
297	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
298		log_fail "Error creating temporary $recvfs/%recv clone"
299	fi
300}
301
302function default_mirror_setup
303{
304	default_mirror_setup_noexit $1 $2 $3
305
306	log_pass
307}
308
309#
310# Given a pair of disks, set up a storage pool and dataset for the mirror
311# @parameters: $1 the primary side of the mirror
312#   $2 the secondary side of the mirror
313# @uses: ZPOOL ZFS TESTPOOL TESTFS
314function default_mirror_setup_noexit
315{
316	readonly func="default_mirror_setup_noexit"
317	typeset primary=$1
318	typeset secondary=$2
319
320	[[ -z $primary ]] && \
321		log_fail "$func: No parameters passed"
322	[[ -z $secondary ]] && \
323		log_fail "$func: No secondary partition passed"
324	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
325	log_must zpool create -f $TESTPOOL mirror $@
326	log_must zfs create $TESTPOOL/$TESTFS
327	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
328}
329
330#
331# create a number of mirrors.
332# We create a number($1) of 2 way mirrors using the pairs of disks named
333# on the command line. These mirrors are *not* mounted
334# @parameters: $1 the number of mirrors to create
335#  $... the devices to use to create the mirrors on
336# @uses: ZPOOL ZFS TESTPOOL
337function setup_mirrors
338{
339	typeset -i nmirrors=$1
340
341	shift
342	while ((nmirrors > 0)); do
343		log_must test -n "$1" -a -n "$2"
344		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
345		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
346		shift 2
347		((nmirrors = nmirrors - 1))
348	done
349}
350
351#
352# create a number of raidz pools.
353# We create a number($1) of 2 raidz pools  using the pairs of disks named
354# on the command line. These pools are *not* mounted
355# @parameters: $1 the number of pools to create
356#  $... the devices to use to create the pools on
357# @uses: ZPOOL ZFS TESTPOOL
358function setup_raidzs
359{
360	typeset -i nraidzs=$1
361
362	shift
363	while ((nraidzs > 0)); do
364		log_must test -n "$1" -a -n "$2"
365		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
366		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
367		shift 2
368		((nraidzs = nraidzs - 1))
369	done
370}
371
372#
373# Destroy the configured testpool mirrors.
374# the mirrors are of the form ${TESTPOOL}{number}
375# @uses: ZPOOL ZFS TESTPOOL
376function destroy_mirrors
377{
378	default_cleanup_noexit
379
380	log_pass
381}
382
383#
384# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
385# $1 the list of disks
386#
387function default_raidz_setup
388{
389	typeset disklist="$*"
390	disks=(${disklist[*]})
391
392	if [[ ${#disks[*]} -lt 2 ]]; then
393		log_fail "A raid-z requires a minimum of two disks."
394	fi
395
396	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
397	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
398	log_must zfs create $TESTPOOL/$TESTFS
399	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
400
401	log_pass
402}
403
404#
405# Common function used to cleanup storage pools and datasets.
406#
407# Invoked at the start of the test suite to ensure the system
408# is in a known state, and also at the end of each set of
409# sub-tests to ensure errors from one set of tests doesn't
410# impact the execution of the next set.
411
412function default_cleanup
413{
414	default_cleanup_noexit
415
416	log_pass
417}
418
419function default_cleanup_noexit
420{
421	typeset exclude=""
422	typeset pool=""
423	#
424	# Destroying the pool will also destroy any
425	# filesystems it contains.
426	#
427	if is_global_zone; then
428		zfs unmount -a > /dev/null 2>&1
429		exclude=`eval echo \"'(${KEEP})'\"`
430		ALL_POOLS=$(zpool list -H -o name \
431		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
432		# Here, we loop through the pools we're allowed to
433		# destroy, only destroying them if it's safe to do
434		# so.
435		while [ ! -z ${ALL_POOLS} ]
436		do
437			for pool in ${ALL_POOLS}
438			do
439				if safe_to_destroy_pool $pool ;
440				then
441					destroy_pool $pool
442				fi
443				ALL_POOLS=$(zpool list -H -o name \
444				    | grep -v "$NO_POOLS" \
445				    | egrep -v "$exclude")
446			done
447		done
448
449		zfs mount -a
450	else
451		typeset fs=""
452		for fs in $(zfs list -H -o name \
453		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
454			datasetexists $fs && \
455				log_must zfs destroy -Rf $fs
456		done
457
458		# Need cleanup here to avoid garbage dir left.
459		for fs in $(zfs list -H -o name); do
460			[[ $fs == /$ZONE_POOL ]] && continue
461			[[ -d $fs ]] && log_must rm -rf $fs/*
462		done
463
464		#
465		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
466		# the default value
467		#
468		for fs in $(zfs list -H -o name); do
469			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
470				log_must zfs set reservation=none $fs
471				log_must zfs set recordsize=128K $fs
472				log_must zfs set mountpoint=/$fs $fs
473				typeset enc=""
474				enc=$(get_prop encryption $fs)
475				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
476					[[ "$enc" == "off" ]]; then
477					log_must zfs set checksum=on $fs
478				fi
479				log_must zfs set compression=off $fs
480				log_must zfs set atime=on $fs
481				log_must zfs set devices=off $fs
482				log_must zfs set exec=on $fs
483				log_must zfs set setuid=on $fs
484				log_must zfs set readonly=off $fs
485				log_must zfs set snapdir=hidden $fs
486				log_must zfs set aclmode=groupmask $fs
487				log_must zfs set aclinherit=secure $fs
488			fi
489		done
490	fi
491
492	[[ -d $TESTDIR ]] && \
493		log_must rm -rf $TESTDIR
494}
495
496
497#
498# Common function used to cleanup storage pools, file systems
499# and containers.
500#
501function default_container_cleanup
502{
503	if ! is_global_zone; then
504		reexport_pool
505	fi
506
507	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
508	[[ $? -eq 0 ]] && \
509	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
510
511	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
512	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
513
514	datasetexists $TESTPOOL/$TESTCTR && \
515	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
516
517	[[ -e $TESTDIR1 ]] && \
518	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
519
520	default_cleanup
521}
522
523#
524# Common function used to cleanup snapshot of file system or volume. Default to
525# delete the file system's snapshot
526#
527# $1 snapshot name
528#
529function destroy_snapshot
530{
531	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
532
533	if ! snapexists $snap; then
534		log_fail "'$snap' does not existed."
535	fi
536
537	#
538	# For the sake of the value which come from 'get_prop' is not equal
539	# to the really mountpoint when the snapshot is unmounted. So, firstly
540	# check and make sure this snapshot's been mounted in current system.
541	#
542	typeset mtpt=""
543	if ismounted $snap; then
544		mtpt=$(get_prop mountpoint $snap)
545		(($? != 0)) && \
546			log_fail "get_prop mountpoint $snap failed."
547	fi
548
549	log_must zfs destroy $snap
550	[[ $mtpt != "" && -d $mtpt ]] && \
551		log_must rm -rf $mtpt
552}
553
554#
555# Common function used to cleanup clone.
556#
557# $1 clone name
558#
559function destroy_clone
560{
561	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
562
563	if ! datasetexists $clone; then
564		log_fail "'$clone' does not existed."
565	fi
566
567	# With the same reason in destroy_snapshot
568	typeset mtpt=""
569	if ismounted $clone; then
570		mtpt=$(get_prop mountpoint $clone)
571		(($? != 0)) && \
572			log_fail "get_prop mountpoint $clone failed."
573	fi
574
575	log_must zfs destroy $clone
576	[[ $mtpt != "" && -d $mtpt ]] && \
577		log_must rm -rf $mtpt
578}
579
580#
581# Common function used to cleanup bookmark of file system or volume.  Default
582# to delete the file system's bookmark.
583#
584# $1 bookmark name
585#
586function destroy_bookmark
587{
588	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
589
590	if ! bkmarkexists $bkmark; then
591		log_fail "'$bkmarkp' does not existed."
592	fi
593
594	log_must zfs destroy $bkmark
595}
596
597# Return 0 if a snapshot exists; $? otherwise
598#
599# $1 - snapshot name
600
601function snapexists
602{
603	zfs list -H -t snapshot "$1" > /dev/null 2>&1
604	return $?
605}
606
607#
608# Return 0 if a bookmark exists; $? otherwise
609#
610# $1 - bookmark name
611#
612function bkmarkexists
613{
614	zfs list -H -t bookmark "$1" > /dev/null 2>&1
615	return $?
616}
617
618#
619# Set a property to a certain value on a dataset.
620# Sets a property of the dataset to the value as passed in.
621# @param:
622#	$1 dataset who's property is being set
623#	$2 property to set
624#	$3 value to set property to
625# @return:
626#	0 if the property could be set.
627#	non-zero otherwise.
628# @use: ZFS
629#
630function dataset_setprop
631{
632	typeset fn=dataset_setprop
633
634	if (($# < 3)); then
635		log_note "$fn: Insufficient parameters (need 3, had $#)"
636		return 1
637	fi
638	typeset output=
639	output=$(zfs set $2=$3 $1 2>&1)
640	typeset rv=$?
641	if ((rv != 0)); then
642		log_note "Setting property on $1 failed."
643		log_note "property $2=$3"
644		log_note "Return Code: $rv"
645		log_note "Output: $output"
646		return $rv
647	fi
648	return 0
649}
650
651#
652# Assign suite defined dataset properties.
653# This function is used to apply the suite's defined default set of
654# properties to a dataset.
655# @parameters: $1 dataset to use
656# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
657# @returns:
658#   0 if the dataset has been altered.
659#   1 if no pool name was passed in.
660#   2 if the dataset could not be found.
661#   3 if the dataset could not have it's properties set.
662#
663function dataset_set_defaultproperties
664{
665	typeset dataset="$1"
666
667	[[ -z $dataset ]] && return 1
668
669	typeset confset=
670	typeset -i found=0
671	for confset in $(zfs list); do
672		if [[ $dataset = $confset ]]; then
673			found=1
674			break
675		fi
676	done
677	[[ $found -eq 0 ]] && return 2
678	if [[ -n $COMPRESSION_PROP ]]; then
679		dataset_setprop $dataset compression $COMPRESSION_PROP || \
680			return 3
681		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
682	fi
683	if [[ -n $CHECKSUM_PROP ]]; then
684		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
685			return 3
686		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
687	fi
688	return 0
689}
690
691#
692# Check a numeric assertion
693# @parameter: $@ the assertion to check
694# @output: big loud notice if assertion failed
695# @use: log_fail
696#
697function assert
698{
699	(($@)) || log_fail "$@"
700}
701
702#
703# Function to format partition size of a disk
704# Given a disk cxtxdx reduces all partitions
705# to 0 size
706#
707function zero_partitions #<whole_disk_name>
708{
709	typeset diskname=$1
710	typeset i
711
712	for i in 0 1 3 4 5 6 7
713	do
714		set_partition $i "" 0mb $diskname
715	done
716}
717
718#
719# Given a slice, size and disk, this function
720# formats the slice to the specified size.
721# Size should be specified with units as per
722# the `format` command requirements eg. 100mb 3gb
723#
724function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
725{
726	typeset -i slicenum=$1
727	typeset start=$2
728	typeset size=$3
729	typeset disk=$4
730	[[ -z $slicenum || -z $size || -z $disk ]] && \
731	    log_fail "The slice, size or disk name is unspecified."
732	typeset format_file=/var/tmp/format_in.$$
733
734	echo "partition" >$format_file
735	echo "$slicenum" >> $format_file
736	echo "" >> $format_file
737	echo "" >> $format_file
738	echo "$start" >> $format_file
739	echo "$size" >> $format_file
740	echo "label" >> $format_file
741	echo "" >> $format_file
742	echo "q" >> $format_file
743	echo "q" >> $format_file
744
745	format -e -s -d $disk -f $format_file
746	typeset ret_val=$?
747	rm -f $format_file
748	[[ $ret_val -ne 0 ]] && \
749	    log_fail "Unable to format $disk slice $slicenum to $size"
750	return 0
751}
752
753#
754# Get the end cyl of the given slice
755#
756function get_endslice #<disk> <slice>
757{
758	typeset disk=$1
759	typeset slice=$2
760	if [[ -z $disk || -z $slice ]] ; then
761		log_fail "The disk name or slice number is unspecified."
762	fi
763
764	disk=${disk#/dev/dsk/}
765	disk=${disk#/dev/rdsk/}
766	disk=${disk%s*}
767
768	typeset -i ratio=0
769	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
770		grep "sectors\/cylinder" | \
771		awk '{print $2}')
772
773	if ((ratio == 0)); then
774		return
775	fi
776
777	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
778		nawk -v token="$slice" '{if ($1==token) print $6}')
779
780	((endcyl = (endcyl + 1) / ratio))
781	echo $endcyl
782}
783
784
785#
786# Given a size,disk and total slice number,  this function formats the
787# disk slices from 0 to the total slice number with the same specified
788# size.
789#
790function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
791{
792	typeset -i i=0
793	typeset slice_size=$1
794	typeset disk_name=$2
795	typeset total_slices=$3
796	typeset cyl
797
798	zero_partitions $disk_name
799	while ((i < $total_slices)); do
800		if ((i == 2)); then
801			((i = i + 1))
802			continue
803		fi
804		set_partition $i "$cyl" $slice_size $disk_name
805		cyl=$(get_endslice $disk_name $i)
806		((i = i+1))
807	done
808}
809
810#
811# This function continues to write to a filenum number of files into dirnum
812# number of directories until either file_write returns an error or the
813# maximum number of files per directory have been written.
814#
815# Usage:
816# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
817#
818# Return value: 0 on success
819#		non 0 on error
820#
821# Where :
822#	destdir:    is the directory where everything is to be created under
823#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
824#	filenum:    the maximum number of files per subdirectory
825#	bytes:	    number of bytes to write
826#	num_writes: numer of types to write out bytes
827#	data:	    the data that will be writen
828#
829#	E.g.
830#	file_fs /testdir 20 25 1024 256 0
831#
832# Note: bytes * num_writes equals the size of the testfile
833#
834function fill_fs # destdir dirnum filenum bytes num_writes data
835{
836	typeset destdir=${1:-$TESTDIR}
837	typeset -i dirnum=${2:-50}
838	typeset -i filenum=${3:-50}
839	typeset -i bytes=${4:-8192}
840	typeset -i num_writes=${5:-10240}
841	typeset -i data=${6:-0}
842
843	typeset -i odirnum=1
844	typeset -i idirnum=0
845	typeset -i fn=0
846	typeset -i retval=0
847
848	log_must mkdir -p $destdir/$idirnum
849	while (($odirnum > 0)); do
850		if ((dirnum >= 0 && idirnum >= dirnum)); then
851			odirnum=0
852			break
853		fi
854		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
855		    -b $bytes -c $num_writes -d $data
856		retval=$?
857		if (($retval != 0)); then
858			odirnum=0
859			break
860		fi
861		if (($fn >= $filenum)); then
862			fn=0
863			((idirnum = idirnum + 1))
864			log_must mkdir -p $destdir/$idirnum
865		else
866			((fn = fn + 1))
867		fi
868	done
869	return $retval
870}
871
872#
873# Simple function to get the specified property. If unable to
874# get the property then exits.
875#
876# Note property is in 'parsable' format (-p)
877#
878function get_prop # property dataset
879{
880	typeset prop_val
881	typeset prop=$1
882	typeset dataset=$2
883
884	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
885	if [[ $? -ne 0 ]]; then
886		log_note "Unable to get $prop property for dataset " \
887		"$dataset"
888		return 1
889	fi
890
891	echo "$prop_val"
892	return 0
893}
894
895#
896# Simple function to get the specified property of pool. If unable to
897# get the property then exits.
898#
899function get_pool_prop # property pool
900{
901	typeset prop_val
902	typeset prop=$1
903	typeset pool=$2
904
905	if poolexists $pool ; then
906		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
907			awk '{print $3}')
908		if [[ $? -ne 0 ]]; then
909			log_note "Unable to get $prop property for pool " \
910			"$pool"
911			return 1
912		fi
913	else
914		log_note "Pool $pool not exists."
915		return 1
916	fi
917
918	echo $prop_val
919	return 0
920}
921
922# Return 0 if a pool exists; $? otherwise
923#
924# $1 - pool name
925
926function poolexists
927{
928	typeset pool=$1
929
930	if [[ -z $pool ]]; then
931		log_note "No pool name given."
932		return 1
933	fi
934
935	zpool get name "$pool" > /dev/null 2>&1
936	return $?
937}
938
939# Return 0 if all the specified datasets exist; $? otherwise
940#
941# $1-n  dataset name
942function datasetexists
943{
944	if (($# == 0)); then
945		log_note "No dataset name given."
946		return 1
947	fi
948
949	while (($# > 0)); do
950		zfs get name $1 > /dev/null 2>&1 || \
951			return $?
952		shift
953	done
954
955	return 0
956}
957
958# return 0 if none of the specified datasets exists, otherwise return 1.
959#
960# $1-n  dataset name
961function datasetnonexists
962{
963	if (($# == 0)); then
964		log_note "No dataset name given."
965		return 1
966	fi
967
968	while (($# > 0)); do
969		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
970		    && return 1
971		shift
972	done
973
974	return 0
975}
976
977#
978# Given a mountpoint, or a dataset name, determine if it is shared.
979#
980# Returns 0 if shared, 1 otherwise.
981#
982function is_shared
983{
984	typeset fs=$1
985	typeset mtpt
986
987	if [[ $fs != "/"* ]] ; then
988		if datasetnonexists "$fs" ; then
989			return 1
990		else
991			mtpt=$(get_prop mountpoint "$fs")
992			case $mtpt in
993				none|legacy|-) return 1
994					;;
995				*)	fs=$mtpt
996					;;
997			esac
998		fi
999	fi
1000
1001	for mtpt in `share | awk '{print $2}'` ; do
1002		if [[ $mtpt == $fs ]] ; then
1003			return 0
1004		fi
1005	done
1006
1007	typeset stat=$(svcs -H -o STA nfs/server:default)
1008	if [[ $stat != "ON" ]]; then
1009		log_note "Current nfs/server status: $stat"
1010	fi
1011
1012	return 1
1013}
1014
1015#
1016# Given a mountpoint, determine if it is not shared.
1017#
1018# Returns 0 if not shared, 1 otherwise.
1019#
1020function not_shared
1021{
1022	typeset fs=$1
1023
1024	is_shared $fs
1025	if (($? == 0)); then
1026		return 1
1027	fi
1028
1029	return 0
1030}
1031
1032#
1033# Helper function to unshare a mountpoint.
1034#
1035function unshare_fs #fs
1036{
1037	typeset fs=$1
1038
1039	is_shared $fs
1040	if (($? == 0)); then
1041		log_must zfs unshare $fs
1042	fi
1043
1044	return 0
1045}
1046
1047#
1048# Check NFS server status and trigger it online.
1049#
1050function setup_nfs_server
1051{
1052	# Cannot share directory in non-global zone.
1053	#
1054	if ! is_global_zone; then
1055		log_note "Cannot trigger NFS server by sharing in LZ."
1056		return
1057	fi
1058
1059	typeset nfs_fmri="svc:/network/nfs/server:default"
1060	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1061		#
1062		# Only really sharing operation can enable NFS server
1063		# to online permanently.
1064		#
1065		typeset dummy=/tmp/dummy
1066
1067		if [[ -d $dummy ]]; then
1068			log_must rm -rf $dummy
1069		fi
1070
1071		log_must mkdir $dummy
1072		log_must share $dummy
1073
1074		#
1075		# Waiting for fmri's status to be the final status.
1076		# Otherwise, in transition, an asterisk (*) is appended for
1077		# instances, unshare will reverse status to 'DIS' again.
1078		#
1079		# Waiting for 1's at least.
1080		#
1081		log_must sleep 1
1082		timeout=10
1083		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1084		do
1085			log_must sleep 1
1086
1087			((timeout -= 1))
1088		done
1089
1090		log_must unshare $dummy
1091		log_must rm -rf $dummy
1092	fi
1093
1094	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1095}
1096
1097#
1098# To verify whether calling process is in global zone
1099#
1100# Return 0 if in global zone, 1 in non-global zone
1101#
1102function is_global_zone
1103{
1104	typeset cur_zone=$(zonename 2>/dev/null)
1105	if [[ $cur_zone != "global" ]]; then
1106		return 1
1107	fi
1108	return 0
1109}
1110
1111#
1112# Verify whether test is permitted to run from
1113# global zone, local zone, or both
1114#
1115# $1 zone limit, could be "global", "local", or "both"(no limit)
1116#
1117# Return 0 if permitted, otherwise exit with log_unsupported
1118#
1119function verify_runnable # zone limit
1120{
1121	typeset limit=$1
1122
1123	[[ -z $limit ]] && return 0
1124
1125	if is_global_zone ; then
1126		case $limit in
1127			global|both)
1128				;;
1129			local)	log_unsupported "Test is unable to run from "\
1130					"global zone."
1131				;;
1132			*)	log_note "Warning: unknown limit $limit - " \
1133					"use both."
1134				;;
1135		esac
1136	else
1137		case $limit in
1138			local|both)
1139				;;
1140			global)	log_unsupported "Test is unable to run from "\
1141					"local zone."
1142				;;
1143			*)	log_note "Warning: unknown limit $limit - " \
1144					"use both."
1145				;;
1146		esac
1147
1148		reexport_pool
1149	fi
1150
1151	return 0
1152}
1153
1154# Return 0 if create successfully or the pool exists; $? otherwise
1155# Note: In local zones, this function should return 0 silently.
1156#
1157# $1 - pool name
1158# $2-n - [keyword] devs_list
1159
1160function create_pool #pool devs_list
1161{
1162	typeset pool=${1%%/*}
1163
1164	shift
1165
1166	if [[ -z $pool ]]; then
1167		log_note "Missing pool name."
1168		return 1
1169	fi
1170
1171	if poolexists $pool ; then
1172		destroy_pool $pool
1173	fi
1174
1175	if is_global_zone ; then
1176		[[ -d /$pool ]] && rm -rf /$pool
1177		log_must zpool create -f $pool $@
1178	fi
1179
1180	return 0
1181}
1182
1183# Return 0 if destroy successfully or the pool exists; $? otherwise
1184# Note: In local zones, this function should return 0 silently.
1185#
1186# $1 - pool name
1187# Destroy pool with the given parameters.
1188
1189function destroy_pool #pool
1190{
1191	typeset pool=${1%%/*}
1192	typeset mtpt
1193
1194	if [[ -z $pool ]]; then
1195		log_note "No pool name given."
1196		return 1
1197	fi
1198
1199	if is_global_zone ; then
1200		if poolexists "$pool" ; then
1201			mtpt=$(get_prop mountpoint "$pool")
1202
1203			# At times, syseventd activity can cause attempts to
1204			# destroy a pool to fail with EBUSY. We retry a few
1205			# times allowing failures before requiring the destroy
1206			# to succeed.
1207			typeset -i wait_time=10 ret=1 count=0
1208			must=""
1209			while [[ $ret -ne 0 ]]; do
1210				$must zpool destroy -f $pool
1211				ret=$?
1212				[[ $ret -eq 0 ]] && break
1213				log_note "zpool destroy failed with $ret"
1214				[[ count++ -ge 7 ]] && must=log_must
1215				sleep $wait_time
1216			done
1217
1218			[[ -d $mtpt ]] && \
1219				log_must rm -rf $mtpt
1220		else
1221			log_note "Pool does not exist. ($pool)"
1222			return 1
1223		fi
1224	fi
1225
1226	return 0
1227}
1228
1229# Return 0 if created successfully; $? otherwise
1230#
1231# $1 - dataset name
1232# $2-n - dataset options
1233
1234function create_dataset #dataset dataset_options
1235{
1236	typeset dataset=$1
1237
1238	shift
1239
1240	if [[ -z $dataset ]]; then
1241		log_note "Missing dataset name."
1242		return 1
1243	fi
1244
1245	if datasetexists $dataset ; then
1246		destroy_dataset $dataset
1247	fi
1248
1249	log_must zfs create $@ $dataset
1250
1251	return 0
1252}
1253
1254# Return 0 if destroy successfully or the dataset exists; $? otherwise
1255# Note: In local zones, this function should return 0 silently.
1256#
1257# $1 - dataset name
1258
1259function destroy_dataset #dataset
1260{
1261	typeset dataset=$1
1262	typeset mtpt
1263
1264	if [[ -z $dataset ]]; then
1265		log_note "No dataset name given."
1266		return 1
1267	fi
1268
1269	if datasetexists "$dataset" ; then
1270		mtpt=$(get_prop mountpoint "$dataset")
1271		log_must zfs destroy -r $dataset
1272		[[ -d $mtpt ]] && log_must rm -rf $mtpt
1273	else
1274		log_note "Dataset does not exist. ($dataset)"
1275		return 1
1276	fi
1277
1278	return 0
1279}
1280
1281#
1282# Firstly, create a pool with 5 datasets. Then, create a single zone and
1283# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1284# and a zvol device to the zone.
1285#
1286# $1 zone name
1287# $2 zone root directory prefix
1288# $3 zone ip
1289#
1290function zfs_zones_setup #zone_name zone_root zone_ip
1291{
1292	typeset zone_name=${1:-$(hostname)-z}
1293	typeset zone_root=${2:-"/zone_root"}
1294	typeset zone_ip=${3:-"10.1.1.10"}
1295	typeset prefix_ctr=$ZONE_CTR
1296	typeset pool_name=$ZONE_POOL
1297	typeset -i cntctr=5
1298	typeset -i i=0
1299
1300	# Create pool and 5 container within it
1301	#
1302	[[ -d /$pool_name ]] && rm -rf /$pool_name
1303	log_must zpool create -f $pool_name $DISKS
1304	while ((i < cntctr)); do
1305		log_must zfs create $pool_name/$prefix_ctr$i
1306		((i += 1))
1307	done
1308
1309	# create a zvol
1310	log_must zfs create -V 1g $pool_name/zone_zvol
1311
1312	#
1313	# If current system support slog, add slog device for pool
1314	#
1315	if verify_slog_support ; then
1316		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1317		log_must mkfile $MINVDEVSIZE $sdevs
1318		log_must zpool add $pool_name log mirror $sdevs
1319	fi
1320
1321	# this isn't supported just yet.
1322	# Create a filesystem. In order to add this to
1323	# the zone, it must have it's mountpoint set to 'legacy'
1324	# log_must zfs create $pool_name/zfs_filesystem
1325	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1326
1327	[[ -d $zone_root ]] && \
1328		log_must rm -rf $zone_root/$zone_name
1329	[[ ! -d $zone_root ]] && \
1330		log_must mkdir -p -m 0700 $zone_root/$zone_name
1331
1332	# Create zone configure file and configure the zone
1333	#
1334	typeset zone_conf=/tmp/zone_conf.$$
1335	echo "create" > $zone_conf
1336	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1337	echo "set autoboot=true" >> $zone_conf
1338	i=0
1339	while ((i < cntctr)); do
1340		echo "add dataset" >> $zone_conf
1341		echo "set name=$pool_name/$prefix_ctr$i" >> \
1342			$zone_conf
1343		echo "end" >> $zone_conf
1344		((i += 1))
1345	done
1346
1347	# add our zvol to the zone
1348	echo "add device" >> $zone_conf
1349	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1350	echo "end" >> $zone_conf
1351
1352	# add a corresponding zvol rdsk to the zone
1353	echo "add device" >> $zone_conf
1354	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1355	echo "end" >> $zone_conf
1356
1357	# once it's supported, we'll add our filesystem to the zone
1358	# echo "add fs" >> $zone_conf
1359	# echo "set type=zfs" >> $zone_conf
1360	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1361	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1362	# echo "end" >> $zone_conf
1363
1364	echo "verify" >> $zone_conf
1365	echo "commit" >> $zone_conf
1366	log_must zonecfg -z $zone_name -f $zone_conf
1367	log_must rm -f $zone_conf
1368
1369	# Install the zone
1370	zoneadm -z $zone_name install
1371	if (($? == 0)); then
1372		log_note "SUCCESS: zoneadm -z $zone_name install"
1373	else
1374		log_fail "FAIL: zoneadm -z $zone_name install"
1375	fi
1376
1377	# Install sysidcfg file
1378	#
1379	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1380	echo "system_locale=C" > $sysidcfg
1381	echo  "terminal=dtterm" >> $sysidcfg
1382	echo  "network_interface=primary {" >> $sysidcfg
1383	echo  "hostname=$zone_name" >> $sysidcfg
1384	echo  "}" >> $sysidcfg
1385	echo  "name_service=NONE" >> $sysidcfg
1386	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1387	echo  "security_policy=NONE" >> $sysidcfg
1388	echo  "timezone=US/Eastern" >> $sysidcfg
1389
1390	# Boot this zone
1391	log_must zoneadm -z $zone_name boot
1392}
1393
1394#
1395# Reexport TESTPOOL & TESTPOOL(1-4)
1396#
1397function reexport_pool
1398{
1399	typeset -i cntctr=5
1400	typeset -i i=0
1401
1402	while ((i < cntctr)); do
1403		if ((i == 0)); then
1404			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1405			if ! ismounted $TESTPOOL; then
1406				log_must zfs mount $TESTPOOL
1407			fi
1408		else
1409			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1410			if eval ! ismounted \$TESTPOOL$i; then
1411				log_must eval zfs mount \$TESTPOOL$i
1412			fi
1413		fi
1414		((i += 1))
1415	done
1416}
1417
1418#
1419# Verify a given disk is online or offline
1420#
1421# Return 0 is pool/disk matches expected state, 1 otherwise
1422#
1423function check_state # pool disk state{online,offline}
1424{
1425	typeset pool=$1
1426	typeset disk=${2#/dev/dsk/}
1427	typeset state=$3
1428
1429	zpool status -v $pool | grep "$disk"  \
1430	    | grep -i "$state" > /dev/null 2>&1
1431
1432	return $?
1433}
1434
1435#
1436# Get the mountpoint of snapshot
1437# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1438# as its mountpoint
1439#
1440function snapshot_mountpoint
1441{
1442	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1443
1444	if [[ $dataset != *@* ]]; then
1445		log_fail "Error name of snapshot '$dataset'."
1446	fi
1447
1448	typeset fs=${dataset%@*}
1449	typeset snap=${dataset#*@}
1450
1451	if [[ -z $fs || -z $snap ]]; then
1452		log_fail "Error name of snapshot '$dataset'."
1453	fi
1454
1455	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1456}
1457
1458#
1459# Given a pool and file system, this function will verify the file system
1460# using the zdb internal tool. Note that the pool is exported and imported
1461# to ensure it has consistent state.
1462#
1463function verify_filesys # pool filesystem dir
1464{
1465	typeset pool="$1"
1466	typeset filesys="$2"
1467	typeset zdbout="/tmp/zdbout.$$"
1468
1469	shift
1470	shift
1471	typeset dirs=$@
1472	typeset search_path=""
1473
1474	log_note "Calling zdb to verify filesystem '$filesys'"
1475	zfs unmount -a > /dev/null 2>&1
1476	log_must zpool export $pool
1477
1478	if [[ -n $dirs ]] ; then
1479		for dir in $dirs ; do
1480			search_path="$search_path -d $dir"
1481		done
1482	fi
1483
1484	log_must zpool import $search_path $pool
1485
1486	zdb -cudi $filesys > $zdbout 2>&1
1487	if [[ $? != 0 ]]; then
1488		log_note "Output: zdb -cudi $filesys"
1489		cat $zdbout
1490		log_fail "zdb detected errors with: '$filesys'"
1491	fi
1492
1493	log_must zfs mount -a
1494	log_must rm -rf $zdbout
1495}
1496
1497#
1498# Given a pool, and this function list all disks in the pool
1499#
1500function get_disklist # pool
1501{
1502	typeset disklist=""
1503
1504	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1505	    grep -v "\-\-\-\-\-" | \
1506	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1507
1508	echo $disklist
1509}
1510
1511# /**
1512#  This function kills a given list of processes after a time period. We use
1513#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1514#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1515#  would be listed as FAIL, which we don't want : we're happy with stress tests
1516#  running for a certain amount of time, then finishing.
1517#
1518# @param $1 the time in seconds after which we should terminate these processes
1519# @param $2..$n the processes we wish to terminate.
1520# */
1521function stress_timeout
1522{
1523	typeset -i TIMEOUT=$1
1524	shift
1525	typeset cpids="$@"
1526
1527	log_note "Waiting for child processes($cpids). " \
1528		"It could last dozens of minutes, please be patient ..."
1529	log_must sleep $TIMEOUT
1530
1531	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1532	typeset pid
1533	for pid in $cpids; do
1534		ps -p $pid > /dev/null 2>&1
1535		if (($? == 0)); then
1536			log_must kill -USR1 $pid
1537		fi
1538	done
1539}
1540
1541#
1542# Verify a given hotspare disk is inuse or avail
1543#
1544# Return 0 is pool/disk matches expected state, 1 otherwise
1545#
1546function check_hotspare_state # pool disk state{inuse,avail}
1547{
1548	typeset pool=$1
1549	typeset disk=${2#/dev/dsk/}
1550	typeset state=$3
1551
1552	cur_state=$(get_device_state $pool $disk "spares")
1553
1554	if [[ $state != ${cur_state} ]]; then
1555		return 1
1556	fi
1557	return 0
1558}
1559
1560#
1561# Wait until a hotspare transitions to a given state or times out.
1562#
1563# Return 0 when  pool/disk matches expected state, 1 on timeout.
1564#
1565function wait_hotspare_state # pool disk state timeout
1566{
1567	typeset pool=$1
1568	typeset disk=${2#$/DEV_DSKDIR/}
1569	typeset state=$3
1570	typeset timeout=${4:-60}
1571	typeset -i i=0
1572
1573	while [[ $i -lt $timeout ]]; do
1574		if check_hotspare_state $pool $disk $state; then
1575			return 0
1576		fi
1577
1578		i=$((i+1))
1579		sleep 1
1580	done
1581
1582	return 1
1583}
1584
1585#
1586# Verify a given slog disk is inuse or avail
1587#
1588# Return 0 is pool/disk matches expected state, 1 otherwise
1589#
1590function check_slog_state # pool disk state{online,offline,unavail}
1591{
1592	typeset pool=$1
1593	typeset disk=${2#/dev/dsk/}
1594	typeset state=$3
1595
1596	cur_state=$(get_device_state $pool $disk "logs")
1597
1598	if [[ $state != ${cur_state} ]]; then
1599		return 1
1600	fi
1601	return 0
1602}
1603
1604#
1605# Verify a given vdev disk is inuse or avail
1606#
1607# Return 0 is pool/disk matches expected state, 1 otherwise
1608#
1609function check_vdev_state # pool disk state{online,offline,unavail}
1610{
1611	typeset pool=$1
1612	typeset disk=${2#/dev/dsk/}
1613	typeset state=$3
1614
1615	cur_state=$(get_device_state $pool $disk)
1616
1617	if [[ $state != ${cur_state} ]]; then
1618		return 1
1619	fi
1620	return 0
1621}
1622
1623#
1624# Wait until a vdev transitions to a given state or times out.
1625#
1626# Return 0 when  pool/disk matches expected state, 1 on timeout.
1627#
1628function wait_vdev_state # pool disk state timeout
1629{
1630	typeset pool=$1
1631	typeset disk=${2#$/DEV_DSKDIR/}
1632	typeset state=$3
1633	typeset timeout=${4:-60}
1634	typeset -i i=0
1635
1636	while [[ $i -lt $timeout ]]; do
1637		if check_vdev_state $pool $disk $state; then
1638			return 0
1639		fi
1640
1641		i=$((i+1))
1642		sleep 1
1643	done
1644
1645	return 1
1646}
1647
1648#
1649# Check the output of 'zpool status -v <pool>',
1650# and to see if the content of <token> contain the <keyword> specified.
1651#
1652# Return 0 is contain, 1 otherwise
1653#
1654function check_pool_status # pool token keyword <verbose>
1655{
1656	typeset pool=$1
1657	typeset token=$2
1658	typeset keyword=$3
1659	typeset verbose=${4:-false}
1660
1661	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1662		($1==token) {print $0}')
1663	if [[ $verbose == true ]]; then
1664		log_note $scan
1665	fi
1666	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1667
1668	return $?
1669}
1670
1671#
1672# These 6 following functions are instance of check_pool_status()
1673#	is_pool_resilvering - to check if the pool is resilver in progress
1674#	is_pool_resilvered - to check if the pool is resilver completed
1675#	is_pool_scrubbing - to check if the pool is scrub in progress
1676#	is_pool_scrubbed - to check if the pool is scrub completed
1677#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1678#	is_pool_scrub_paused - to check if the pool has scrub paused
1679#	is_pool_removing - to check if the pool is removing a vdev
1680#	is_pool_removed - to check if the pool is remove completed
1681#
1682function is_pool_resilvering #pool <verbose>
1683{
1684	check_pool_status "$1" "scan" "resilver in progress since " $2
1685	return $?
1686}
1687
1688function is_pool_resilvered #pool <verbose>
1689{
1690	check_pool_status "$1" "scan" "resilvered " $2
1691	return $?
1692}
1693
1694function is_pool_scrubbing #pool <verbose>
1695{
1696	check_pool_status "$1" "scan" "scrub in progress since " $2
1697	return $?
1698}
1699
1700function is_pool_scrubbed #pool <verbose>
1701{
1702	check_pool_status "$1" "scan" "scrub repaired" $2
1703	return $?
1704}
1705
1706function is_pool_scrub_stopped #pool <verbose>
1707{
1708	check_pool_status "$1" "scan" "scrub canceled" $2
1709	return $?
1710}
1711
1712function is_pool_scrub_paused #pool <verbose>
1713{
1714	check_pool_status "$1" "scan" "scrub paused since " $2
1715	return $?
1716}
1717
1718function is_pool_removing #pool
1719{
1720	check_pool_status "$1" "remove" "in progress since "
1721	return $?
1722}
1723
1724function is_pool_removed #pool
1725{
1726	check_pool_status "$1" "remove" "completed on"
1727	return $?
1728}
1729
1730function wait_for_degraded
1731{
1732	typeset pool=$1
1733	typeset timeout=${2:-30}
1734	typeset t0=$SECONDS
1735
1736	while :; do
1737		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1738		log_note "$pool is not yet degraded."
1739		sleep 1
1740		if ((SECONDS - t0 > $timeout)); then
1741			log_note "$pool not degraded after $timeout seconds."
1742			return 1
1743		fi
1744	done
1745
1746	return 0
1747}
1748
1749#
1750# Use create_pool()/destroy_pool() to clean up the infomation in
1751# in the given disk to avoid slice overlapping.
1752#
1753function cleanup_devices #vdevs
1754{
1755	typeset pool="foopool$$"
1756
1757	if poolexists $pool ; then
1758		destroy_pool $pool
1759	fi
1760
1761	create_pool $pool $@
1762	destroy_pool $pool
1763
1764	return 0
1765}
1766
1767#/**
1768# A function to find and locate free disks on a system or from given
1769# disks as the parameter. It works by locating disks that are in use
1770# as swap devices and dump devices, and also disks listed in /etc/vfstab
1771#
1772# $@ given disks to find which are free, default is all disks in
1773# the test system
1774#
1775# @return a string containing the list of available disks
1776#*/
1777function find_disks
1778{
1779	sfi=/tmp/swaplist.$$
1780	dmpi=/tmp/dumpdev.$$
1781	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1782
1783	swap -l > $sfi
1784	dumpadm > $dmpi 2>/dev/null
1785
1786# write an awk script that can process the output of format
1787# to produce a list of disks we know about. Note that we have
1788# to escape "$2" so that the shell doesn't interpret it while
1789# we're creating the awk script.
1790# -------------------
1791	cat > /tmp/find_disks.awk <<EOF
1792#!/bin/nawk -f
1793	BEGIN { FS="."; }
1794
1795	/^Specify disk/{
1796		searchdisks=0;
1797	}
1798
1799	{
1800		if (searchdisks && \$2 !~ "^$"){
1801			split(\$2,arr," ");
1802			print arr[1];
1803		}
1804	}
1805
1806	/^AVAILABLE DISK SELECTIONS:/{
1807		searchdisks=1;
1808	}
1809EOF
1810#---------------------
1811
1812	chmod 755 /tmp/find_disks.awk
1813	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1814	rm /tmp/find_disks.awk
1815
1816	unused=""
1817	for disk in $disks; do
1818	# Check for mounted
1819		grep "${disk}[sp]" /etc/mnttab >/dev/null
1820		(($? == 0)) && continue
1821	# Check for swap
1822		grep "${disk}[sp]" $sfi >/dev/null
1823		(($? == 0)) && continue
1824	# check for dump device
1825		grep "${disk}[sp]" $dmpi >/dev/null
1826		(($? == 0)) && continue
1827	# check to see if this disk hasn't been explicitly excluded
1828	# by a user-set environment variable
1829		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1830		(($? == 0)) && continue
1831		unused_candidates="$unused_candidates $disk"
1832	done
1833	rm $sfi
1834	rm $dmpi
1835
1836# now just check to see if those disks do actually exist
1837# by looking for a device pointing to the first slice in
1838# each case. limit the number to max_finddisksnum
1839	count=0
1840	for disk in $unused_candidates; do
1841		if [ -b /dev/dsk/${disk}s0 ]; then
1842		if [ $count -lt $max_finddisksnum ]; then
1843			unused="$unused $disk"
1844			# do not impose limit if $@ is provided
1845			[[ -z $@ ]] && ((count = count + 1))
1846		fi
1847		fi
1848	done
1849
1850# finally, return our disk list
1851	echo $unused
1852}
1853
1854#
1855# Add specified user to specified group
1856#
1857# $1 group name
1858# $2 user name
1859# $3 base of the homedir (optional)
1860#
1861function add_user #<group_name> <user_name> <basedir>
1862{
1863	typeset gname=$1
1864	typeset uname=$2
1865	typeset basedir=${3:-"/var/tmp"}
1866
1867	if ((${#gname} == 0 || ${#uname} == 0)); then
1868		log_fail "group name or user name are not defined."
1869	fi
1870
1871	log_must useradd -g $gname -d $basedir/$uname -m $uname
1872
1873	return 0
1874}
1875
1876#
1877# Delete the specified user.
1878#
1879# $1 login name
1880# $2 base of the homedir (optional)
1881#
1882function del_user #<logname> <basedir>
1883{
1884	typeset user=$1
1885	typeset basedir=${2:-"/var/tmp"}
1886
1887	if ((${#user} == 0)); then
1888		log_fail "login name is necessary."
1889	fi
1890
1891	if id $user > /dev/null 2>&1; then
1892		log_must userdel $user
1893	fi
1894
1895	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1896
1897	return 0
1898}
1899
1900#
1901# Select valid gid and create specified group.
1902#
1903# $1 group name
1904#
1905function add_group #<group_name>
1906{
1907	typeset group=$1
1908
1909	if ((${#group} == 0)); then
1910		log_fail "group name is necessary."
1911	fi
1912
1913	# Assign 100 as the base gid
1914	typeset -i gid=100
1915	while true; do
1916		groupadd -g $gid $group > /dev/null 2>&1
1917		typeset -i ret=$?
1918		case $ret in
1919			0) return 0 ;;
1920			# The gid is not  unique
1921			4) ((gid += 1)) ;;
1922			*) return 1 ;;
1923		esac
1924	done
1925}
1926
1927#
1928# Delete the specified group.
1929#
1930# $1 group name
1931#
1932function del_group #<group_name>
1933{
1934	typeset grp=$1
1935	if ((${#grp} == 0)); then
1936		log_fail "group name is necessary."
1937	fi
1938
1939	groupmod -n $grp $grp > /dev/null 2>&1
1940	typeset -i ret=$?
1941	case $ret in
1942		# Group does not exist.
1943		6) return 0 ;;
1944		# Name already exists as a group name
1945		9) log_must groupdel $grp ;;
1946		*) return 1 ;;
1947	esac
1948
1949	return 0
1950}
1951
1952#
1953# This function will return true if it's safe to destroy the pool passed
1954# as argument 1. It checks for pools based on zvols and files, and also
1955# files contained in a pool that may have a different mountpoint.
1956#
1957function safe_to_destroy_pool { # $1 the pool name
1958
1959	typeset pool=""
1960	typeset DONT_DESTROY=""
1961
1962	# We check that by deleting the $1 pool, we're not
1963	# going to pull the rug out from other pools. Do this
1964	# by looking at all other pools, ensuring that they
1965	# aren't built from files or zvols contained in this pool.
1966
1967	for pool in $(zpool list -H -o name)
1968	do
1969		ALTMOUNTPOOL=""
1970
1971		# this is a list of the top-level directories in each of the
1972		# files that make up the path to the files the pool is based on
1973		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1974			awk '{print $1}')
1975
1976		# this is a list of the zvols that make up the pool
1977		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1978		    | awk '{print $1}')
1979
1980		# also want to determine if it's a file-based pool using an
1981		# alternate mountpoint...
1982		POOL_FILE_DIRS=$(zpool status -v $pool | \
1983					grep / | awk '{print $1}' | \
1984					awk -F/ '{print $2}' | grep -v "dev")
1985
1986		for pooldir in $POOL_FILE_DIRS
1987		do
1988			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
1989					grep "${pooldir}$" | awk '{print $1}')
1990
1991			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1992		done
1993
1994
1995		if [ ! -z "$ZVOLPOOL" ]
1996		then
1997			DONT_DESTROY="true"
1998			log_note "Pool $pool is built from $ZVOLPOOL on $1"
1999		fi
2000
2001		if [ ! -z "$FILEPOOL" ]
2002		then
2003			DONT_DESTROY="true"
2004			log_note "Pool $pool is built from $FILEPOOL on $1"
2005		fi
2006
2007		if [ ! -z "$ALTMOUNTPOOL" ]
2008		then
2009			DONT_DESTROY="true"
2010			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2011		fi
2012	done
2013
2014	if [ -z "${DONT_DESTROY}" ]
2015	then
2016		return 0
2017	else
2018		log_note "Warning: it is not safe to destroy $1!"
2019		return 1
2020	fi
2021}
2022
2023#
2024# Get the available ZFS compression options
2025# $1 option type zfs_set|zfs_compress
2026#
2027function get_compress_opts
2028{
2029	typeset COMPRESS_OPTS
2030	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2031			gzip-6 gzip-7 gzip-8 gzip-9"
2032
2033	if [[ $1 == "zfs_compress" ]] ; then
2034		COMPRESS_OPTS="on lzjb"
2035	elif [[ $1 == "zfs_set" ]] ; then
2036		COMPRESS_OPTS="on off lzjb"
2037	fi
2038	typeset valid_opts="$COMPRESS_OPTS"
2039	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2040	if [[ $? -eq 0 ]]; then
2041		valid_opts="$valid_opts $GZIP_OPTS"
2042	fi
2043	echo "$valid_opts"
2044}
2045
2046#
2047# Verify zfs operation with -p option work as expected
2048# $1 operation, value could be create, clone or rename
2049# $2 dataset type, value could be fs or vol
2050# $3 dataset name
2051# $4 new dataset name
2052#
2053function verify_opt_p_ops
2054{
2055	typeset ops=$1
2056	typeset datatype=$2
2057	typeset dataset=$3
2058	typeset newdataset=$4
2059
2060	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2061		log_fail "$datatype is not supported."
2062	fi
2063
2064	# check parameters accordingly
2065	case $ops in
2066		create)
2067			newdataset=$dataset
2068			dataset=""
2069			if [[ $datatype == "vol" ]]; then
2070				ops="create -V $VOLSIZE"
2071			fi
2072			;;
2073		clone)
2074			if [[ -z $newdataset ]]; then
2075				log_fail "newdataset should not be empty" \
2076					"when ops is $ops."
2077			fi
2078			log_must datasetexists $dataset
2079			log_must snapexists $dataset
2080			;;
2081		rename)
2082			if [[ -z $newdataset ]]; then
2083				log_fail "newdataset should not be empty" \
2084					"when ops is $ops."
2085			fi
2086			log_must datasetexists $dataset
2087			log_mustnot snapexists $dataset
2088			;;
2089		*)
2090			log_fail "$ops is not supported."
2091			;;
2092	esac
2093
2094	# make sure the upper level filesystem does not exist
2095	if datasetexists ${newdataset%/*} ; then
2096		log_must zfs destroy -rRf ${newdataset%/*}
2097	fi
2098
2099	# without -p option, operation will fail
2100	log_mustnot zfs $ops $dataset $newdataset
2101	log_mustnot datasetexists $newdataset ${newdataset%/*}
2102
2103	# with -p option, operation should succeed
2104	log_must zfs $ops -p $dataset $newdataset
2105	if ! datasetexists $newdataset ; then
2106		log_fail "-p option does not work for $ops"
2107	fi
2108
2109	# when $ops is create or clone, redo the operation still return zero
2110	if [[ $ops != "rename" ]]; then
2111		log_must zfs $ops -p $dataset $newdataset
2112	fi
2113
2114	return 0
2115}
2116
2117#
2118# Get configuration of pool
2119# $1 pool name
2120# $2 config name
2121#
2122function get_config
2123{
2124	typeset pool=$1
2125	typeset config=$2
2126	typeset alt_root
2127
2128	if ! poolexists "$pool" ; then
2129		return 1
2130	fi
2131	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2132	if [[ $alt_root == "-" ]]; then
2133		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2134		    '{print $2}')
2135	else
2136		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2137		    '{print $2}')
2138	fi
2139	if [[ -n $value ]] ; then
2140		value=${value#'}
2141		value=${value%'}
2142	fi
2143	echo $value
2144
2145	return 0
2146}
2147
2148#
2149# Privated function. Random select one of items from arguments.
2150#
2151# $1 count
2152# $2-n string
2153#
2154function _random_get
2155{
2156	typeset cnt=$1
2157	shift
2158
2159	typeset str="$@"
2160	typeset -i ind
2161	((ind = RANDOM % cnt + 1))
2162
2163	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2164	echo $ret
2165}
2166
2167#
2168# Random select one of item from arguments which include NONE string
2169#
2170function random_get_with_non
2171{
2172	typeset -i cnt=$#
2173	((cnt =+ 1))
2174
2175	_random_get "$cnt" "$@"
2176}
2177
2178#
2179# Random select one of item from arguments which doesn't include NONE string
2180#
2181function random_get
2182{
2183	_random_get "$#" "$@"
2184}
2185
2186#
2187# Detect if the current system support slog
2188#
2189function verify_slog_support
2190{
2191	typeset dir=/tmp/disk.$$
2192	typeset pool=foo.$$
2193	typeset vdev=$dir/a
2194	typeset sdev=$dir/b
2195
2196	mkdir -p $dir
2197	mkfile $MINVDEVSIZE $vdev $sdev
2198
2199	typeset -i ret=0
2200	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2201		ret=1
2202	fi
2203	rm -r $dir
2204
2205	return $ret
2206}
2207
2208#
2209# The function will generate a dataset name with specific length
2210# $1, the length of the name
2211# $2, the base string to construct the name
2212#
2213function gen_dataset_name
2214{
2215	typeset -i len=$1
2216	typeset basestr="$2"
2217	typeset -i baselen=${#basestr}
2218	typeset -i iter=0
2219	typeset l_name=""
2220
2221	if ((len % baselen == 0)); then
2222		((iter = len / baselen))
2223	else
2224		((iter = len / baselen + 1))
2225	fi
2226	while ((iter > 0)); do
2227		l_name="${l_name}$basestr"
2228
2229		((iter -= 1))
2230	done
2231
2232	echo $l_name
2233}
2234
2235#
2236# Get cksum tuple of dataset
2237# $1 dataset name
2238#
2239# sample zdb output:
2240# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2241# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2242# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2243# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2244function datasetcksum
2245{
2246	typeset cksum
2247	sync
2248	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2249		| awk -F= '{print $7}')
2250	echo $cksum
2251}
2252
2253#
2254# Get cksum of file
2255# #1 file path
2256#
2257function checksum
2258{
2259	typeset cksum
2260	cksum=$(cksum $1 | awk '{print $1}')
2261	echo $cksum
2262}
2263
2264#
2265# Get the given disk/slice state from the specific field of the pool
2266#
2267function get_device_state #pool disk field("", "spares","logs")
2268{
2269	typeset pool=$1
2270	typeset disk=${2#/dev/dsk/}
2271	typeset field=${3:-$pool}
2272
2273	state=$(zpool status -v "$pool" 2>/dev/null | \
2274		nawk -v device=$disk -v pool=$pool -v field=$field \
2275		'BEGIN {startconfig=0; startfield=0; }
2276		/config:/ {startconfig=1}
2277		(startconfig==1) && ($1==field) {startfield=1; next;}
2278		(startfield==1) && ($1==device) {print $2; exit;}
2279		(startfield==1) &&
2280		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2281	echo $state
2282}
2283
2284
2285#
2286# print the given directory filesystem type
2287#
2288# $1 directory name
2289#
2290function get_fstype
2291{
2292	typeset dir=$1
2293
2294	if [[ -z $dir ]]; then
2295		log_fail "Usage: get_fstype <directory>"
2296	fi
2297
2298	#
2299	#  $ df -n /
2300	#  /		  : ufs
2301	#
2302	df -n $dir | awk '{print $3}'
2303}
2304
2305#
2306# Given a disk, label it to VTOC regardless what label was on the disk
2307# $1 disk
2308#
2309function labelvtoc
2310{
2311	typeset disk=$1
2312	if [[ -z $disk ]]; then
2313		log_fail "The disk name is unspecified."
2314	fi
2315	typeset label_file=/var/tmp/labelvtoc.$$
2316	typeset arch=$(uname -p)
2317
2318	if [[ $arch == "i386" ]]; then
2319		echo "label" > $label_file
2320		echo "0" >> $label_file
2321		echo "" >> $label_file
2322		echo "q" >> $label_file
2323		echo "q" >> $label_file
2324
2325		fdisk -B $disk >/dev/null 2>&1
2326		# wait a while for fdisk finishes
2327		sleep 60
2328	elif [[ $arch == "sparc" ]]; then
2329		echo "label" > $label_file
2330		echo "0" >> $label_file
2331		echo "" >> $label_file
2332		echo "" >> $label_file
2333		echo "" >> $label_file
2334		echo "q" >> $label_file
2335	else
2336		log_fail "unknown arch type"
2337	fi
2338
2339	format -e -s -d $disk -f $label_file
2340	typeset -i ret_val=$?
2341	rm -f $label_file
2342	#
2343	# wait the format to finish
2344	#
2345	sleep 60
2346	if ((ret_val != 0)); then
2347		log_fail "unable to label $disk as VTOC."
2348	fi
2349
2350	return 0
2351}
2352
2353#
2354# check if the system was installed as zfsroot or not
2355# return: 0 ture, otherwise false
2356#
2357function is_zfsroot
2358{
2359	df -n / | grep zfs > /dev/null 2>&1
2360	return $?
2361}
2362
2363#
2364# get the root filesystem name if it's zfsroot system.
2365#
2366# return: root filesystem name
2367function get_rootfs
2368{
2369	typeset rootfs=""
2370	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2371		/etc/mnttab)
2372	if [[ -z "$rootfs" ]]; then
2373		log_fail "Can not get rootfs"
2374	fi
2375	zfs list $rootfs > /dev/null 2>&1
2376	if (($? == 0)); then
2377		echo $rootfs
2378	else
2379		log_fail "This is not a zfsroot system."
2380	fi
2381}
2382
2383#
2384# get the rootfs's pool name
2385# return:
2386#       rootpool name
2387#
2388function get_rootpool
2389{
2390	typeset rootfs=""
2391	typeset rootpool=""
2392	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2393		 /etc/mnttab)
2394	if [[ -z "$rootfs" ]]; then
2395		log_fail "Can not get rootpool"
2396	fi
2397	zfs list $rootfs > /dev/null 2>&1
2398	if (($? == 0)); then
2399		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2400		echo $rootpool
2401	else
2402		log_fail "This is not a zfsroot system."
2403	fi
2404}
2405
2406#
2407# Check if the given device is physical device
2408#
2409function is_physical_device #device
2410{
2411	typeset device=${1#/dev/dsk/}
2412	device=${device#/dev/rdsk/}
2413
2414	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2415	return $?
2416}
2417
2418#
2419# Get the directory path of given device
2420#
2421function get_device_dir #device
2422{
2423	typeset device=$1
2424
2425	if ! $(is_physical_device $device) ; then
2426		if [[ $device != "/" ]]; then
2427			device=${device%/*}
2428		fi
2429		echo $device
2430	else
2431		echo "/dev/dsk"
2432	fi
2433}
2434
2435#
2436# Get the package name
2437#
2438function get_package_name
2439{
2440	typeset dirpath=${1:-$STC_NAME}
2441
2442	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2443}
2444
2445#
2446# Get the word numbers from a string separated by white space
2447#
2448function get_word_count
2449{
2450	echo $1 | wc -w
2451}
2452
2453#
2454# To verify if the require numbers of disks is given
2455#
2456function verify_disk_count
2457{
2458	typeset -i min=${2:-1}
2459
2460	typeset -i count=$(get_word_count "$1")
2461
2462	if ((count < min)); then
2463		log_untested "A minimum of $min disks is required to run." \
2464			" You specified $count disk(s)"
2465	fi
2466}
2467
2468function ds_is_volume
2469{
2470	typeset type=$(get_prop type $1)
2471	[[ $type = "volume" ]] && return 0
2472	return 1
2473}
2474
2475function ds_is_filesystem
2476{
2477	typeset type=$(get_prop type $1)
2478	[[ $type = "filesystem" ]] && return 0
2479	return 1
2480}
2481
2482function ds_is_snapshot
2483{
2484	typeset type=$(get_prop type $1)
2485	[[ $type = "snapshot" ]] && return 0
2486	return 1
2487}
2488
2489#
2490# Check if Trusted Extensions are installed and enabled
2491#
2492function is_te_enabled
2493{
2494	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2495	if (($? != 0)); then
2496		return 1
2497	else
2498		return 0
2499	fi
2500}
2501
2502# Utility function to determine if a system has multiple cpus.
2503function is_mp
2504{
2505	(($(psrinfo | wc -l) > 1))
2506}
2507
2508function get_cpu_freq
2509{
2510	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2511}
2512
2513# Run the given command as the user provided.
2514function user_run
2515{
2516	typeset user=$1
2517	shift
2518
2519	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2520	return $?
2521}
2522
2523#
2524# Check if the pool contains the specified vdevs
2525#
2526# $1 pool
2527# $2..n <vdev> ...
2528#
2529# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2530# vdevs is not in the pool, and 2 if pool name is missing.
2531#
2532function vdevs_in_pool
2533{
2534	typeset pool=$1
2535	typeset vdev
2536
2537        if [[ -z $pool ]]; then
2538                log_note "Missing pool name."
2539                return 2
2540        fi
2541
2542	shift
2543
2544	typeset tmpfile=$(mktemp)
2545	zpool list -Hv "$pool" >$tmpfile
2546	for vdev in $@; do
2547		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2548		[[ $? -ne 0 ]] && return 1
2549	done
2550
2551	rm -f $tmpfile
2552
2553	return 0;
2554}
2555
2556function get_max
2557{
2558	typeset -l i max=$1
2559	shift
2560
2561	for i in "$@"; do
2562		max=$(echo $((max > i ? max : i)))
2563	done
2564
2565	echo $max
2566}
2567
2568function get_min
2569{
2570	typeset -l i min=$1
2571	shift
2572
2573	for i in "$@"; do
2574		min=$(echo $((min < i ? min : i)))
2575	done
2576
2577	echo $min
2578}
2579
2580#
2581# Generate a random number between 1 and the argument.
2582#
2583function random
2584{
2585        typeset max=$1
2586        echo $(( ($RANDOM % $max) + 1 ))
2587}
2588
2589# Write data that can be compressed into a directory
2590function write_compressible
2591{
2592	typeset dir=$1
2593	typeset megs=$2
2594	typeset nfiles=${3:-1}
2595	typeset bs=${4:-1024k}
2596	typeset fname=${5:-file}
2597
2598	[[ -d $dir ]] || log_fail "No directory: $dir"
2599
2600	log_must eval "fio \
2601	    --name=job \
2602	    --fallocate=0 \
2603	    --minimal \
2604	    --randrepeat=0 \
2605	    --buffer_compress_percentage=66 \
2606	    --buffer_compress_chunk=4096 \
2607	    --directory=$dir \
2608	    --numjobs=$nfiles \
2609	    --rw=write \
2610	    --bs=$bs \
2611	    --filesize=$megs \
2612	    --filename_format='$fname.\$jobnum' >/dev/null"
2613}
2614
2615function get_objnum
2616{
2617	typeset pathname=$1
2618	typeset objnum
2619
2620	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2621	objnum=$(stat -c %i $pathname)
2622	echo $objnum
2623}
2624
2625#
2626# Prints the current time in seconds since UNIX Epoch.
2627#
2628function current_epoch
2629{
2630	printf '%(%s)T'
2631}
2632
2633#
2634# Get decimal value of global uint32_t variable using mdb.
2635#
2636function mdb_get_uint32
2637{
2638	typeset variable=$1
2639	typeset value
2640
2641	value=$(mdb -k -e "$variable/X | ::eval .=U")
2642	if [[ $? -ne 0 ]]; then
2643		log_fail "Failed to get value of '$variable' from mdb."
2644		return 1
2645	fi
2646
2647	echo $value
2648	return 0
2649}
2650
2651#
2652# Set global uint32_t variable to a decimal value using mdb.
2653#
2654function mdb_set_uint32
2655{
2656	typeset variable=$1
2657	typeset value=$2
2658
2659	mdb -kw -e "$variable/W 0t$value" > /dev/null
2660	if [[ $? -ne 0 ]]; then
2661		echo "Failed to set '$variable' to '$value' in mdb."
2662		return 1
2663	fi
2664
2665	return 0
2666}
2667
2668#
2669# Set global scalar integer variable to a hex value using mdb.
2670# Note: Target should have CTF data loaded.
2671#
2672function mdb_ctf_set_int
2673{
2674	typeset variable=$1
2675	typeset value=$2
2676
2677	mdb -kw -e "$variable/z $value" > /dev/null
2678	if [[ $? -ne 0 ]]; then
2679		echo "Failed to set '$variable' to '$value' in mdb."
2680		return 1
2681	fi
2682
2683	return 0
2684}
2685