xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision f52943a93040563107b95bccb9db87d9971ef47d)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright (c) 2017 by Tim Chase. All rights reserved.
27# Copyright (c) 2017 by Nexenta Systems, Inc. All rights reserved.
28# Copyright (c) 2017 Datto Inc.
29# Copyright 2019 Joyent, Inc.
30#
31
32. ${STF_TOOLS}/contrib/include/logapi.shlib
33. ${STF_SUITE}/include/math.shlib
34. ${STF_SUITE}/include/blkdev.shlib
35
36# Determine if this is a Linux test system
37#
38# Return 0 if platform Linux, 1 if otherwise
39
40function is_linux
41{
42	if [[ $(uname -o) == "GNU/Linux" ]]; then
43		return 0
44	else
45		return 1
46	fi
47}
48
49# Determine whether a dataset is mounted
50#
51# $1 dataset name
52# $2 filesystem type; optional - defaulted to zfs
53#
54# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
55
56function ismounted
57{
58	typeset fstype=$2
59	[[ -z $fstype ]] && fstype=zfs
60	typeset out dir name ret
61
62	case $fstype in
63		zfs)
64			if [[ "$1" == "/"* ]] ; then
65				for out in $(zfs mount | awk '{print $2}'); do
66					[[ $1 == $out ]] && return 0
67				done
68			else
69				for out in $(zfs mount | awk '{print $1}'); do
70					[[ $1 == $out ]] && return 0
71				done
72			fi
73		;;
74		ufs|nfs)
75			out=$(df -F $fstype $1 2>/dev/null)
76			ret=$?
77			(($ret != 0)) && return $ret
78
79			dir=${out%%\(*}
80			dir=${dir%% *}
81			name=${out##*\(}
82			name=${name%%\)*}
83			name=${name%% *}
84
85			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
86		;;
87	esac
88
89	return 1
90}
91
92# Return 0 if a dataset is mounted; 1 otherwise
93#
94# $1 dataset name
95# $2 filesystem type; optional - defaulted to zfs
96
97function mounted
98{
99	ismounted $1 $2
100	(($? == 0)) && return 0
101	return 1
102}
103
104# Return 0 if a dataset is unmounted; 1 otherwise
105#
106# $1 dataset name
107# $2 filesystem type; optional - defaulted to zfs
108
109function unmounted
110{
111	ismounted $1 $2
112	(($? == 1)) && return 0
113	return 1
114}
115
116# split line on ","
117#
118# $1 - line to split
119
120function splitline
121{
122	echo $1 | sed "s/,/ /g"
123}
124
125function default_setup
126{
127	default_setup_noexit "$@"
128
129	log_pass
130}
131
132#
133# Given a list of disks, setup storage pools and datasets.
134#
135function default_setup_noexit
136{
137	typeset disklist=$1
138	typeset container=$2
139	typeset volume=$3
140
141	if is_global_zone; then
142		if poolexists $TESTPOOL ; then
143			destroy_pool $TESTPOOL
144		fi
145		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
146		log_must zpool create -f $TESTPOOL $disklist
147	else
148		reexport_pool
149	fi
150
151	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
152	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
153
154	log_must zfs create $TESTPOOL/$TESTFS
155	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
156
157	if [[ -n $container ]]; then
158		rm -rf $TESTDIR1  || \
159			log_unresolved Could not remove $TESTDIR1
160		mkdir -p $TESTDIR1 || \
161			log_unresolved Could not create $TESTDIR1
162
163		log_must zfs create $TESTPOOL/$TESTCTR
164		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
165		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
166		log_must zfs set mountpoint=$TESTDIR1 \
167		    $TESTPOOL/$TESTCTR/$TESTFS1
168	fi
169
170	if [[ -n $volume ]]; then
171		if is_global_zone ; then
172			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
173		else
174			log_must zfs create $TESTPOOL/$TESTVOL
175		fi
176	fi
177}
178
179#
180# Given a list of disks, setup a storage pool, file system and
181# a container.
182#
183function default_container_setup
184{
185	typeset disklist=$1
186
187	default_setup "$disklist" "true"
188}
189
190#
191# Given a list of disks, setup a storage pool,file system
192# and a volume.
193#
194function default_volume_setup
195{
196	typeset disklist=$1
197
198	default_setup "$disklist" "" "true"
199}
200
201#
202# Given a list of disks, setup a storage pool,file system,
203# a container and a volume.
204#
205function default_container_volume_setup
206{
207	typeset disklist=$1
208
209	default_setup "$disklist" "true" "true"
210}
211
212#
213# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
214# filesystem
215#
216# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
217# $2 snapshot name. Default, $TESTSNAP
218#
219function create_snapshot
220{
221	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
222	typeset snap=${2:-$TESTSNAP}
223
224	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
225	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
226
227	if snapexists $fs_vol@$snap; then
228		log_fail "$fs_vol@$snap already exists."
229	fi
230	datasetexists $fs_vol || \
231		log_fail "$fs_vol must exist."
232
233	log_must zfs snapshot $fs_vol@$snap
234}
235
236#
237# Create a clone from a snapshot, default clone name is $TESTCLONE.
238#
239# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
240# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
241#
242function create_clone   # snapshot clone
243{
244	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
245	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
246
247	[[ -z $snap ]] && \
248		log_fail "Snapshot name is undefined."
249	[[ -z $clone ]] && \
250		log_fail "Clone name is undefined."
251
252	log_must zfs clone $snap $clone
253}
254
255#
256# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
257# filesystem.
258#
259# $1 Existing filesystem or volume name. Default, $TESTFS
260# $2 Existing snapshot name. Default, $TESTSNAP
261# $3 bookmark name. Default, $TESTBKMARK
262#
263function create_bookmark
264{
265	typeset fs_vol=${1:-$TESTFS}
266	typeset snap=${2:-$TESTSNAP}
267	typeset bkmark=${3:-$TESTBKMARK}
268
269	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
270	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
271	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
272
273	if bkmarkexists $fs_vol#$bkmark; then
274		log_fail "$fs_vol#$bkmark already exists."
275	fi
276	datasetexists $fs_vol || \
277		log_fail "$fs_vol must exist."
278	snapexists $fs_vol@$snap || \
279		log_fail "$fs_vol@$snap must exist."
280
281	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
282}
283
284#
285# Create a temporary clone result of an interrupted resumable 'zfs receive'
286# $1 Destination filesystem name. Must not exist, will be created as the result
287#    of this function along with its %recv temporary clone
288# $2 Source filesystem name. Must not exist, will be created and destroyed
289#
290function create_recv_clone
291{
292	typeset recvfs="$1"
293	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
294	typeset snap="$sendfs@snap1"
295	typeset incr="$sendfs@snap2"
296	typeset mountpoint="$TESTDIR/create_recv_clone"
297	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
298
299	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
300
301	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
302	datasetexists $sendfs && log_fail "Send filesystem must not exist."
303
304	log_must zfs create -o mountpoint="$mountpoint" $sendfs
305	log_must zfs snapshot $snap
306	log_must eval "zfs send $snap | zfs recv -u $recvfs"
307	log_must mkfile 1m "$mountpoint/data"
308	log_must zfs snapshot $incr
309	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
310	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
311	log_must zfs destroy -r $sendfs
312	log_must rm -f "$sendfile"
313
314	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
315		log_fail "Error creating temporary $recvfs/%recv clone"
316	fi
317}
318
319function default_mirror_setup
320{
321	default_mirror_setup_noexit $1 $2 $3
322
323	log_pass
324}
325
326#
327# Given a pair of disks, set up a storage pool and dataset for the mirror
328# @parameters: $1 the primary side of the mirror
329#   $2 the secondary side of the mirror
330# @uses: ZPOOL ZFS TESTPOOL TESTFS
331function default_mirror_setup_noexit
332{
333	readonly func="default_mirror_setup_noexit"
334	typeset primary=$1
335	typeset secondary=$2
336
337	[[ -z $primary ]] && \
338		log_fail "$func: No parameters passed"
339	[[ -z $secondary ]] && \
340		log_fail "$func: No secondary partition passed"
341	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
342	log_must zpool create -f $TESTPOOL mirror $@
343	log_must zfs create $TESTPOOL/$TESTFS
344	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
345}
346
347#
348# create a number of mirrors.
349# We create a number($1) of 2 way mirrors using the pairs of disks named
350# on the command line. These mirrors are *not* mounted
351# @parameters: $1 the number of mirrors to create
352#  $... the devices to use to create the mirrors on
353# @uses: ZPOOL ZFS TESTPOOL
354function setup_mirrors
355{
356	typeset -i nmirrors=$1
357
358	shift
359	while ((nmirrors > 0)); do
360		log_must test -n "$1" -a -n "$2"
361		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
362		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
363		shift 2
364		((nmirrors = nmirrors - 1))
365	done
366}
367
368#
369# create a number of raidz pools.
370# We create a number($1) of 2 raidz pools  using the pairs of disks named
371# on the command line. These pools are *not* mounted
372# @parameters: $1 the number of pools to create
373#  $... the devices to use to create the pools on
374# @uses: ZPOOL ZFS TESTPOOL
375function setup_raidzs
376{
377	typeset -i nraidzs=$1
378
379	shift
380	while ((nraidzs > 0)); do
381		log_must test -n "$1" -a -n "$2"
382		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
383		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
384		shift 2
385		((nraidzs = nraidzs - 1))
386	done
387}
388
389#
390# Destroy the configured testpool mirrors.
391# the mirrors are of the form ${TESTPOOL}{number}
392# @uses: ZPOOL ZFS TESTPOOL
393function destroy_mirrors
394{
395	default_cleanup_noexit
396
397	log_pass
398}
399
400#
401# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
402# $1 the list of disks
403#
404function default_raidz_setup
405{
406	typeset disklist="$*"
407	disks=(${disklist[*]})
408
409	if [[ ${#disks[*]} -lt 2 ]]; then
410		log_fail "A raid-z requires a minimum of two disks."
411	fi
412
413	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
414	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
415	log_must zfs create $TESTPOOL/$TESTFS
416	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
417
418	log_pass
419}
420
421#
422# Common function used to cleanup storage pools and datasets.
423#
424# Invoked at the start of the test suite to ensure the system
425# is in a known state, and also at the end of each set of
426# sub-tests to ensure errors from one set of tests doesn't
427# impact the execution of the next set.
428
429function default_cleanup
430{
431	default_cleanup_noexit
432
433	log_pass
434}
435
436function default_cleanup_noexit
437{
438	typeset exclude=""
439	typeset pool=""
440	#
441	# Destroying the pool will also destroy any
442	# filesystems it contains.
443	#
444	if is_global_zone; then
445		zfs unmount -a > /dev/null 2>&1
446		exclude=`eval echo \"'(${KEEP})'\"`
447		ALL_POOLS=$(zpool list -H -o name \
448		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
449		# Here, we loop through the pools we're allowed to
450		# destroy, only destroying them if it's safe to do
451		# so.
452		while [ ! -z ${ALL_POOLS} ]
453		do
454			for pool in ${ALL_POOLS}
455			do
456				if safe_to_destroy_pool $pool ;
457				then
458					destroy_pool $pool
459				fi
460				ALL_POOLS=$(zpool list -H -o name \
461				    | grep -v "$NO_POOLS" \
462				    | egrep -v "$exclude")
463			done
464		done
465
466		zfs mount -a
467	else
468		typeset fs=""
469		for fs in $(zfs list -H -o name \
470		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
471			datasetexists $fs && \
472				log_must zfs destroy -Rf $fs
473		done
474
475		# Need cleanup here to avoid garbage dir left.
476		for fs in $(zfs list -H -o name); do
477			[[ $fs == /$ZONE_POOL ]] && continue
478			[[ -d $fs ]] && log_must rm -rf $fs/*
479		done
480
481		#
482		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
483		# the default value
484		#
485		for fs in $(zfs list -H -o name); do
486			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
487				log_must zfs set reservation=none $fs
488				log_must zfs set recordsize=128K $fs
489				log_must zfs set mountpoint=/$fs $fs
490				typeset enc=""
491				enc=$(get_prop encryption $fs)
492				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
493					[[ "$enc" == "off" ]]; then
494					log_must zfs set checksum=on $fs
495				fi
496				log_must zfs set compression=off $fs
497				log_must zfs set atime=on $fs
498				log_must zfs set devices=off $fs
499				log_must zfs set exec=on $fs
500				log_must zfs set setuid=on $fs
501				log_must zfs set readonly=off $fs
502				log_must zfs set snapdir=hidden $fs
503				log_must zfs set aclmode=groupmask $fs
504				log_must zfs set aclinherit=secure $fs
505			fi
506		done
507	fi
508
509	[[ -d $TESTDIR ]] && \
510		log_must rm -rf $TESTDIR
511}
512
513
514#
515# Common function used to cleanup storage pools, file systems
516# and containers.
517#
518function default_container_cleanup
519{
520	if ! is_global_zone; then
521		reexport_pool
522	fi
523
524	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
525	[[ $? -eq 0 ]] && \
526	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
527
528	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
529	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
530
531	datasetexists $TESTPOOL/$TESTCTR && \
532	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
533
534	[[ -e $TESTDIR1 ]] && \
535	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
536
537	default_cleanup
538}
539
540#
541# Common function used to cleanup snapshot of file system or volume. Default to
542# delete the file system's snapshot
543#
544# $1 snapshot name
545#
546function destroy_snapshot
547{
548	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
549
550	if ! snapexists $snap; then
551		log_fail "'$snap' does not existed."
552	fi
553
554	#
555	# For the sake of the value which come from 'get_prop' is not equal
556	# to the really mountpoint when the snapshot is unmounted. So, firstly
557	# check and make sure this snapshot's been mounted in current system.
558	#
559	typeset mtpt=""
560	if ismounted $snap; then
561		mtpt=$(get_prop mountpoint $snap)
562		(($? != 0)) && \
563			log_fail "get_prop mountpoint $snap failed."
564	fi
565
566	log_must zfs destroy $snap
567	[[ $mtpt != "" && -d $mtpt ]] && \
568		log_must rm -rf $mtpt
569}
570
571#
572# Common function used to cleanup clone.
573#
574# $1 clone name
575#
576function destroy_clone
577{
578	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
579
580	if ! datasetexists $clone; then
581		log_fail "'$clone' does not existed."
582	fi
583
584	# With the same reason in destroy_snapshot
585	typeset mtpt=""
586	if ismounted $clone; then
587		mtpt=$(get_prop mountpoint $clone)
588		(($? != 0)) && \
589			log_fail "get_prop mountpoint $clone failed."
590	fi
591
592	log_must zfs destroy $clone
593	[[ $mtpt != "" && -d $mtpt ]] && \
594		log_must rm -rf $mtpt
595}
596
597#
598# Common function used to cleanup bookmark of file system or volume.  Default
599# to delete the file system's bookmark.
600#
601# $1 bookmark name
602#
603function destroy_bookmark
604{
605	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
606
607	if ! bkmarkexists $bkmark; then
608		log_fail "'$bkmarkp' does not existed."
609	fi
610
611	log_must zfs destroy $bkmark
612}
613
614# Return 0 if a snapshot exists; $? otherwise
615#
616# $1 - snapshot name
617
618function snapexists
619{
620	zfs list -H -t snapshot "$1" > /dev/null 2>&1
621	return $?
622}
623
624#
625# Return 0 if a bookmark exists; $? otherwise
626#
627# $1 - bookmark name
628#
629function bkmarkexists
630{
631	zfs list -H -t bookmark "$1" > /dev/null 2>&1
632	return $?
633}
634
635#
636# Set a property to a certain value on a dataset.
637# Sets a property of the dataset to the value as passed in.
638# @param:
639#	$1 dataset who's property is being set
640#	$2 property to set
641#	$3 value to set property to
642# @return:
643#	0 if the property could be set.
644#	non-zero otherwise.
645# @use: ZFS
646#
647function dataset_setprop
648{
649	typeset fn=dataset_setprop
650
651	if (($# < 3)); then
652		log_note "$fn: Insufficient parameters (need 3, had $#)"
653		return 1
654	fi
655	typeset output=
656	output=$(zfs set $2=$3 $1 2>&1)
657	typeset rv=$?
658	if ((rv != 0)); then
659		log_note "Setting property on $1 failed."
660		log_note "property $2=$3"
661		log_note "Return Code: $rv"
662		log_note "Output: $output"
663		return $rv
664	fi
665	return 0
666}
667
668#
669# Assign suite defined dataset properties.
670# This function is used to apply the suite's defined default set of
671# properties to a dataset.
672# @parameters: $1 dataset to use
673# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
674# @returns:
675#   0 if the dataset has been altered.
676#   1 if no pool name was passed in.
677#   2 if the dataset could not be found.
678#   3 if the dataset could not have it's properties set.
679#
680function dataset_set_defaultproperties
681{
682	typeset dataset="$1"
683
684	[[ -z $dataset ]] && return 1
685
686	typeset confset=
687	typeset -i found=0
688	for confset in $(zfs list); do
689		if [[ $dataset = $confset ]]; then
690			found=1
691			break
692		fi
693	done
694	[[ $found -eq 0 ]] && return 2
695	if [[ -n $COMPRESSION_PROP ]]; then
696		dataset_setprop $dataset compression $COMPRESSION_PROP || \
697			return 3
698		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
699	fi
700	if [[ -n $CHECKSUM_PROP ]]; then
701		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
702			return 3
703		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
704	fi
705	return 0
706}
707
708#
709# Check a numeric assertion
710# @parameter: $@ the assertion to check
711# @output: big loud notice if assertion failed
712# @use: log_fail
713#
714function assert
715{
716	(($@)) || log_fail "$@"
717}
718
719#
720# Function to format partition size of a disk
721# Given a disk cxtxdx reduces all partitions
722# to 0 size
723#
724function zero_partitions #<whole_disk_name>
725{
726	typeset diskname=$1
727	typeset i
728
729	for i in 0 1 3 4 5 6 7
730	do
731		set_partition $i "" 0mb $diskname
732	done
733}
734
735#
736# Given a slice, size and disk, this function
737# formats the slice to the specified size.
738# Size should be specified with units as per
739# the `format` command requirements eg. 100mb 3gb
740#
741function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
742{
743	typeset -i slicenum=$1
744	typeset start=$2
745	typeset size=$3
746	typeset disk=$4
747	[[ -z $slicenum || -z $size || -z $disk ]] && \
748	    log_fail "The slice, size or disk name is unspecified."
749	typeset format_file=/var/tmp/format_in.$$
750
751	echo "partition" >$format_file
752	echo "$slicenum" >> $format_file
753	echo "" >> $format_file
754	echo "" >> $format_file
755	echo "$start" >> $format_file
756	echo "$size" >> $format_file
757	echo "label" >> $format_file
758	echo "" >> $format_file
759	echo "q" >> $format_file
760	echo "q" >> $format_file
761
762	format -e -s -d $disk -f $format_file
763	typeset ret_val=$?
764	rm -f $format_file
765	[[ $ret_val -ne 0 ]] && \
766	    log_fail "Unable to format $disk slice $slicenum to $size"
767	return 0
768}
769
770#
771# Get the end cyl of the given slice
772#
773function get_endslice #<disk> <slice>
774{
775	typeset disk=$1
776	typeset slice=$2
777	if [[ -z $disk || -z $slice ]] ; then
778		log_fail "The disk name or slice number is unspecified."
779	fi
780
781	disk=${disk#/dev/dsk/}
782	disk=${disk#/dev/rdsk/}
783	disk=${disk%s*}
784
785	typeset -i ratio=0
786	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
787		grep "sectors\/cylinder" | \
788		awk '{print $2}')
789
790	if ((ratio == 0)); then
791		return
792	fi
793
794	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
795		nawk -v token="$slice" '{if ($1==token) print $6}')
796
797	((endcyl = (endcyl + 1) / ratio))
798	echo $endcyl
799}
800
801
802#
803# Given a size,disk and total slice number,  this function formats the
804# disk slices from 0 to the total slice number with the same specified
805# size.
806#
807function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
808{
809	typeset -i i=0
810	typeset slice_size=$1
811	typeset disk_name=$2
812	typeset total_slices=$3
813	typeset cyl
814
815	zero_partitions $disk_name
816	while ((i < $total_slices)); do
817		if ((i == 2)); then
818			((i = i + 1))
819			continue
820		fi
821		set_partition $i "$cyl" $slice_size $disk_name
822		cyl=$(get_endslice $disk_name $i)
823		((i = i+1))
824	done
825}
826
827#
828# This function continues to write to a filenum number of files into dirnum
829# number of directories until either file_write returns an error or the
830# maximum number of files per directory have been written.
831#
832# Usage:
833# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
834#
835# Return value: 0 on success
836#		non 0 on error
837#
838# Where :
839#	destdir:    is the directory where everything is to be created under
840#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
841#	filenum:    the maximum number of files per subdirectory
842#	bytes:	    number of bytes to write
843#	num_writes: numer of types to write out bytes
844#	data:	    the data that will be writen
845#
846#	E.g.
847#	file_fs /testdir 20 25 1024 256 0
848#
849# Note: bytes * num_writes equals the size of the testfile
850#
851function fill_fs # destdir dirnum filenum bytes num_writes data
852{
853	typeset destdir=${1:-$TESTDIR}
854	typeset -i dirnum=${2:-50}
855	typeset -i filenum=${3:-50}
856	typeset -i bytes=${4:-8192}
857	typeset -i num_writes=${5:-10240}
858	typeset data=${6:-0}
859
860	typeset -i odirnum=1
861	typeset -i idirnum=0
862	typeset -i fn=0
863	typeset -i retval=0
864
865	mkdir -p $destdir/$idirnum
866	while (($odirnum > 0)); do
867		if ((dirnum >= 0 && idirnum >= dirnum)); then
868			odirnum=0
869			break
870		fi
871		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
872		    -b $bytes -c $num_writes -d $data
873		retval=$?
874		if (($retval != 0)); then
875			odirnum=0
876			break
877		fi
878		if (($fn >= $filenum)); then
879			fn=0
880			((idirnum = idirnum + 1))
881			mkdir -p $destdir/$idirnum
882		else
883			((fn = fn + 1))
884		fi
885	done
886	return $retval
887}
888
889#
890# Simple function to get the specified property. If unable to
891# get the property then exits.
892#
893# Note property is in 'parsable' format (-p)
894#
895function get_prop # property dataset
896{
897	typeset prop_val
898	typeset prop=$1
899	typeset dataset=$2
900
901	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
902	if [[ $? -ne 0 ]]; then
903		log_note "Unable to get $prop property for dataset " \
904		"$dataset"
905		return 1
906	fi
907
908	echo "$prop_val"
909	return 0
910}
911
912#
913# Simple function to get the specified property of pool. If unable to
914# get the property then exits.
915#
916function get_pool_prop # property pool
917{
918	typeset prop_val
919	typeset prop=$1
920	typeset pool=$2
921
922	if poolexists $pool ; then
923		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
924			awk '{print $3}')
925		if [[ $? -ne 0 ]]; then
926			log_note "Unable to get $prop property for pool " \
927			"$pool"
928			return 1
929		fi
930	else
931		log_note "Pool $pool not exists."
932		return 1
933	fi
934
935	echo $prop_val
936	return 0
937}
938
939# Return 0 if a pool exists; $? otherwise
940#
941# $1 - pool name
942
943function poolexists
944{
945	typeset pool=$1
946
947	if [[ -z $pool ]]; then
948		log_note "No pool name given."
949		return 1
950	fi
951
952	zpool get name "$pool" > /dev/null 2>&1
953	return $?
954}
955
956# Return 0 if all the specified datasets exist; $? otherwise
957#
958# $1-n  dataset name
959function datasetexists
960{
961	if (($# == 0)); then
962		log_note "No dataset name given."
963		return 1
964	fi
965
966	while (($# > 0)); do
967		zfs get name $1 > /dev/null 2>&1 || \
968			return $?
969		shift
970	done
971
972	return 0
973}
974
975# return 0 if none of the specified datasets exists, otherwise return 1.
976#
977# $1-n  dataset name
978function datasetnonexists
979{
980	if (($# == 0)); then
981		log_note "No dataset name given."
982		return 1
983	fi
984
985	while (($# > 0)); do
986		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
987		    && return 1
988		shift
989	done
990
991	return 0
992}
993
994#
995# Given a mountpoint, or a dataset name, determine if it is shared.
996#
997# Returns 0 if shared, 1 otherwise.
998#
999function is_shared
1000{
1001	typeset fs=$1
1002	typeset mtpt
1003
1004	if [[ $fs != "/"* ]] ; then
1005		if datasetnonexists "$fs" ; then
1006			return 1
1007		else
1008			mtpt=$(get_prop mountpoint "$fs")
1009			case $mtpt in
1010				none|legacy|-) return 1
1011					;;
1012				*)	fs=$mtpt
1013					;;
1014			esac
1015		fi
1016	fi
1017
1018	for mtpt in `share | awk '{print $2}'` ; do
1019		if [[ $mtpt == $fs ]] ; then
1020			return 0
1021		fi
1022	done
1023
1024	typeset stat=$(svcs -H -o STA nfs/server:default)
1025	if [[ $stat != "ON" ]]; then
1026		log_note "Current nfs/server status: $stat"
1027	fi
1028
1029	return 1
1030}
1031
1032#
1033# Given a mountpoint, determine if it is not shared.
1034#
1035# Returns 0 if not shared, 1 otherwise.
1036#
1037function not_shared
1038{
1039	typeset fs=$1
1040
1041	is_shared $fs
1042	if (($? == 0)); then
1043		return 1
1044	fi
1045
1046	return 0
1047}
1048
1049#
1050# Helper function to unshare a mountpoint.
1051#
1052function unshare_fs #fs
1053{
1054	typeset fs=$1
1055
1056	is_shared $fs
1057	if (($? == 0)); then
1058		log_must zfs unshare $fs
1059	fi
1060
1061	return 0
1062}
1063
1064#
1065# Check NFS server status and trigger it online.
1066#
1067function setup_nfs_server
1068{
1069	# Cannot share directory in non-global zone.
1070	#
1071	if ! is_global_zone; then
1072		log_note "Cannot trigger NFS server by sharing in LZ."
1073		return
1074	fi
1075
1076	typeset nfs_fmri="svc:/network/nfs/server:default"
1077	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1078		#
1079		# Only really sharing operation can enable NFS server
1080		# to online permanently.
1081		#
1082		typeset dummy=/tmp/dummy
1083
1084		if [[ -d $dummy ]]; then
1085			log_must rm -rf $dummy
1086		fi
1087
1088		log_must mkdir $dummy
1089		log_must share $dummy
1090
1091		#
1092		# Waiting for fmri's status to be the final status.
1093		# Otherwise, in transition, an asterisk (*) is appended for
1094		# instances, unshare will reverse status to 'DIS' again.
1095		#
1096		# Waiting for 1's at least.
1097		#
1098		log_must sleep 1
1099		timeout=10
1100		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1101		do
1102			log_must sleep 1
1103
1104			((timeout -= 1))
1105		done
1106
1107		log_must unshare $dummy
1108		log_must rm -rf $dummy
1109	fi
1110
1111	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1112}
1113
1114#
1115# To verify whether calling process is in global zone
1116#
1117# Return 0 if in global zone, 1 in non-global zone
1118#
1119function is_global_zone
1120{
1121	typeset cur_zone=$(zonename 2>/dev/null)
1122	if [[ $cur_zone != "global" ]]; then
1123		return 1
1124	fi
1125	return 0
1126}
1127
1128#
1129# Verify whether test is permitted to run from
1130# global zone, local zone, or both
1131#
1132# $1 zone limit, could be "global", "local", or "both"(no limit)
1133#
1134# Return 0 if permitted, otherwise exit with log_unsupported
1135#
1136function verify_runnable # zone limit
1137{
1138	typeset limit=$1
1139
1140	[[ -z $limit ]] && return 0
1141
1142	if is_global_zone ; then
1143		case $limit in
1144			global|both)
1145				;;
1146			local)	log_unsupported "Test is unable to run from "\
1147					"global zone."
1148				;;
1149			*)	log_note "Warning: unknown limit $limit - " \
1150					"use both."
1151				;;
1152		esac
1153	else
1154		case $limit in
1155			local|both)
1156				;;
1157			global)	log_unsupported "Test is unable to run from "\
1158					"local zone."
1159				;;
1160			*)	log_note "Warning: unknown limit $limit - " \
1161					"use both."
1162				;;
1163		esac
1164
1165		reexport_pool
1166	fi
1167
1168	return 0
1169}
1170
1171# Return 0 if create successfully or the pool exists; $? otherwise
1172# Note: In local zones, this function should return 0 silently.
1173#
1174# $1 - pool name
1175# $2-n - [keyword] devs_list
1176
1177function create_pool #pool devs_list
1178{
1179	typeset pool=${1%%/*}
1180
1181	shift
1182
1183	if [[ -z $pool ]]; then
1184		log_note "Missing pool name."
1185		return 1
1186	fi
1187
1188	if poolexists $pool ; then
1189		destroy_pool $pool
1190	fi
1191
1192	if is_global_zone ; then
1193		[[ -d /$pool ]] && rm -rf /$pool
1194		log_must zpool create -f $pool $@
1195	fi
1196
1197	return 0
1198}
1199
1200# Return 0 if destroy successfully or the pool exists; $? otherwise
1201# Note: In local zones, this function should return 0 silently.
1202#
1203# $1 - pool name
1204# Destroy pool with the given parameters.
1205
1206function destroy_pool #pool
1207{
1208	typeset pool=${1%%/*}
1209	typeset mtpt
1210
1211	if [[ -z $pool ]]; then
1212		log_note "No pool name given."
1213		return 1
1214	fi
1215
1216	if is_global_zone ; then
1217		if poolexists "$pool" ; then
1218			mtpt=$(get_prop mountpoint "$pool")
1219
1220			# At times, syseventd activity can cause attempts to
1221			# destroy a pool to fail with EBUSY. We retry a few
1222			# times allowing failures before requiring the destroy
1223			# to succeed.
1224			typeset -i wait_time=10 ret=1 count=0
1225			must=""
1226			while [[ $ret -ne 0 ]]; do
1227				$must zpool destroy -f $pool
1228				ret=$?
1229				[[ $ret -eq 0 ]] && break
1230				log_note "zpool destroy failed with $ret"
1231				[[ count++ -ge 7 ]] && must=log_must
1232				sleep $wait_time
1233			done
1234
1235			[[ -d $mtpt ]] && \
1236				log_must rm -rf $mtpt
1237		else
1238			log_note "Pool does not exist. ($pool)"
1239			return 1
1240		fi
1241	fi
1242
1243	return 0
1244}
1245
1246# Return 0 if created successfully; $? otherwise
1247#
1248# $1 - dataset name
1249# $2-n - dataset options
1250
1251function create_dataset #dataset dataset_options
1252{
1253	typeset dataset=$1
1254
1255	shift
1256
1257	if [[ -z $dataset ]]; then
1258		log_note "Missing dataset name."
1259		return 1
1260	fi
1261
1262	if datasetexists $dataset ; then
1263		destroy_dataset $dataset
1264	fi
1265
1266	log_must zfs create $@ $dataset
1267
1268	return 0
1269}
1270
1271# Return 0 if destroy successfully or the dataset exists; $? otherwise
1272# Note: In local zones, this function should return 0 silently.
1273#
1274# $1 - dataset name
1275
1276function destroy_dataset #dataset
1277{
1278	typeset dataset=$1
1279	typeset mtpt
1280
1281	if [[ -z $dataset ]]; then
1282		log_note "No dataset name given."
1283		return 1
1284	fi
1285
1286	if datasetexists "$dataset" ; then
1287		mtpt=$(get_prop mountpoint "$dataset")
1288		log_must zfs destroy -r $dataset
1289		[[ -d $mtpt ]] && log_must rm -rf $mtpt
1290	else
1291		log_note "Dataset does not exist. ($dataset)"
1292		return 1
1293	fi
1294
1295	return 0
1296}
1297
1298#
1299# Firstly, create a pool with 5 datasets. Then, create a single zone and
1300# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1301# and a zvol device to the zone.
1302#
1303# $1 zone name
1304# $2 zone root directory prefix
1305# $3 zone ip
1306#
1307function zfs_zones_setup #zone_name zone_root zone_ip
1308{
1309	typeset zone_name=${1:-$(hostname)-z}
1310	typeset zone_root=${2:-"/zone_root"}
1311	typeset zone_ip=${3:-"10.1.1.10"}
1312	typeset prefix_ctr=$ZONE_CTR
1313	typeset pool_name=$ZONE_POOL
1314	typeset -i cntctr=5
1315	typeset -i i=0
1316
1317	# Create pool and 5 container within it
1318	#
1319	[[ -d /$pool_name ]] && rm -rf /$pool_name
1320	log_must zpool create -f $pool_name $DISKS
1321	while ((i < cntctr)); do
1322		log_must zfs create $pool_name/$prefix_ctr$i
1323		((i += 1))
1324	done
1325
1326	# create a zvol
1327	log_must zfs create -V 1g $pool_name/zone_zvol
1328
1329	#
1330	# If current system support slog, add slog device for pool
1331	#
1332	if verify_slog_support ; then
1333		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1334		log_must mkfile $MINVDEVSIZE $sdevs
1335		log_must zpool add $pool_name log mirror $sdevs
1336	fi
1337
1338	# this isn't supported just yet.
1339	# Create a filesystem. In order to add this to
1340	# the zone, it must have it's mountpoint set to 'legacy'
1341	# log_must zfs create $pool_name/zfs_filesystem
1342	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1343
1344	[[ -d $zone_root ]] && \
1345		log_must rm -rf $zone_root/$zone_name
1346	[[ ! -d $zone_root ]] && \
1347		log_must mkdir -p -m 0700 $zone_root/$zone_name
1348
1349	# Create zone configure file and configure the zone
1350	#
1351	typeset zone_conf=/tmp/zone_conf.$$
1352	echo "create" > $zone_conf
1353	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1354	echo "set autoboot=true" >> $zone_conf
1355	i=0
1356	while ((i < cntctr)); do
1357		echo "add dataset" >> $zone_conf
1358		echo "set name=$pool_name/$prefix_ctr$i" >> \
1359			$zone_conf
1360		echo "end" >> $zone_conf
1361		((i += 1))
1362	done
1363
1364	# add our zvol to the zone
1365	echo "add device" >> $zone_conf
1366	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1367	echo "end" >> $zone_conf
1368
1369	# add a corresponding zvol rdsk to the zone
1370	echo "add device" >> $zone_conf
1371	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1372	echo "end" >> $zone_conf
1373
1374	# once it's supported, we'll add our filesystem to the zone
1375	# echo "add fs" >> $zone_conf
1376	# echo "set type=zfs" >> $zone_conf
1377	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1378	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1379	# echo "end" >> $zone_conf
1380
1381	echo "verify" >> $zone_conf
1382	echo "commit" >> $zone_conf
1383	log_must zonecfg -z $zone_name -f $zone_conf
1384	log_must rm -f $zone_conf
1385
1386	# Install the zone
1387	zoneadm -z $zone_name install
1388	if (($? == 0)); then
1389		log_note "SUCCESS: zoneadm -z $zone_name install"
1390	else
1391		log_fail "FAIL: zoneadm -z $zone_name install"
1392	fi
1393
1394	# Install sysidcfg file
1395	#
1396	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1397	echo "system_locale=C" > $sysidcfg
1398	echo  "terminal=dtterm" >> $sysidcfg
1399	echo  "network_interface=primary {" >> $sysidcfg
1400	echo  "hostname=$zone_name" >> $sysidcfg
1401	echo  "}" >> $sysidcfg
1402	echo  "name_service=NONE" >> $sysidcfg
1403	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1404	echo  "security_policy=NONE" >> $sysidcfg
1405	echo  "timezone=US/Eastern" >> $sysidcfg
1406
1407	# Boot this zone
1408	log_must zoneadm -z $zone_name boot
1409}
1410
1411#
1412# Reexport TESTPOOL & TESTPOOL(1-4)
1413#
1414function reexport_pool
1415{
1416	typeset -i cntctr=5
1417	typeset -i i=0
1418
1419	while ((i < cntctr)); do
1420		if ((i == 0)); then
1421			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1422			if ! ismounted $TESTPOOL; then
1423				log_must zfs mount $TESTPOOL
1424			fi
1425		else
1426			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1427			if eval ! ismounted \$TESTPOOL$i; then
1428				log_must eval zfs mount \$TESTPOOL$i
1429			fi
1430		fi
1431		((i += 1))
1432	done
1433}
1434
1435#
1436# Verify a given disk is online or offline
1437#
1438# Return 0 is pool/disk matches expected state, 1 otherwise
1439#
1440function check_state # pool disk state{online,offline}
1441{
1442	typeset pool=$1
1443	typeset disk=${2#/dev/dsk/}
1444	typeset state=$3
1445
1446	zpool status -v $pool | grep "$disk"  \
1447	    | grep -i "$state" > /dev/null 2>&1
1448
1449	return $?
1450}
1451
1452#
1453# Get the mountpoint of snapshot
1454# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1455# as its mountpoint
1456#
1457function snapshot_mountpoint
1458{
1459	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1460
1461	if [[ $dataset != *@* ]]; then
1462		log_fail "Error name of snapshot '$dataset'."
1463	fi
1464
1465	typeset fs=${dataset%@*}
1466	typeset snap=${dataset#*@}
1467
1468	if [[ -z $fs || -z $snap ]]; then
1469		log_fail "Error name of snapshot '$dataset'."
1470	fi
1471
1472	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1473}
1474
1475#
1476# Given a device and 'ashift' value verify it's correctly set on every label
1477#
1478function verify_ashift # device ashift
1479{
1480	typeset device="$1"
1481	typeset ashift="$2"
1482
1483	zdb -e -lll $device | nawk -v ashift=$ashift '/ashift: / {
1484	    if (ashift != $2)
1485	        exit 1;
1486	    else
1487	        count++;
1488	    } END {
1489	    if (count != 4)
1490	        exit 1;
1491	    else
1492	        exit 0;
1493	    }'
1494
1495	return $?
1496}
1497
1498#
1499# Given a pool and file system, this function will verify the file system
1500# using the zdb internal tool. Note that the pool is exported and imported
1501# to ensure it has consistent state.
1502#
1503function verify_filesys # pool filesystem dir
1504{
1505	typeset pool="$1"
1506	typeset filesys="$2"
1507	typeset zdbout="/tmp/zdbout.$$"
1508
1509	shift
1510	shift
1511	typeset dirs=$@
1512	typeset search_path=""
1513
1514	log_note "Calling zdb to verify filesystem '$filesys'"
1515	zfs unmount -a > /dev/null 2>&1
1516	log_must zpool export $pool
1517
1518	if [[ -n $dirs ]] ; then
1519		for dir in $dirs ; do
1520			search_path="$search_path -d $dir"
1521		done
1522	fi
1523
1524	log_must zpool import $search_path $pool
1525
1526	zdb -cudi $filesys > $zdbout 2>&1
1527	if [[ $? != 0 ]]; then
1528		log_note "Output: zdb -cudi $filesys"
1529		cat $zdbout
1530		log_fail "zdb detected errors with: '$filesys'"
1531	fi
1532
1533	log_must zfs mount -a
1534	log_must rm -rf $zdbout
1535}
1536
1537#
1538# Given a pool issue a scrub and verify that no checksum errors are reported.
1539#
1540function verify_pool
1541{
1542	typeset pool=${1:-$TESTPOOL}
1543
1544	log_must zpool scrub $pool
1545	log_must wait_scrubbed $pool
1546
1547	cksum=$(zpool status $pool | \
1548	    awk '{if ($5 == "CKSUM"){L=1; next} if (L) {print $NF;L=0}}')
1549	if [[ $cksum != 0 ]]; then
1550		log_must zpool status -v
1551	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1552	fi
1553}
1554
1555#
1556# Given a pool, and this function list all disks in the pool
1557#
1558function get_disklist # pool
1559{
1560	typeset disklist=""
1561
1562	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1563	    grep -v "\-\-\-\-\-" | \
1564	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1565
1566	echo $disklist
1567}
1568
1569# /**
1570#  This function kills a given list of processes after a time period. We use
1571#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1572#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1573#  would be listed as FAIL, which we don't want : we're happy with stress tests
1574#  running for a certain amount of time, then finishing.
1575#
1576# @param $1 the time in seconds after which we should terminate these processes
1577# @param $2..$n the processes we wish to terminate.
1578# */
1579function stress_timeout
1580{
1581	typeset -i TIMEOUT=$1
1582	shift
1583	typeset cpids="$@"
1584
1585	log_note "Waiting for child processes($cpids). " \
1586		"It could last dozens of minutes, please be patient ..."
1587	log_must sleep $TIMEOUT
1588
1589	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1590	typeset pid
1591	for pid in $cpids; do
1592		ps -p $pid > /dev/null 2>&1
1593		if (($? == 0)); then
1594			log_must kill -USR1 $pid
1595		fi
1596	done
1597}
1598
1599#
1600# Verify a given hotspare disk is inuse or avail
1601#
1602# Return 0 is pool/disk matches expected state, 1 otherwise
1603#
1604function check_hotspare_state # pool disk state{inuse,avail}
1605{
1606	typeset pool=$1
1607	typeset disk=${2#/dev/dsk/}
1608	typeset state=$3
1609
1610	cur_state=$(get_device_state $pool $disk "spares")
1611
1612	if [[ $state != ${cur_state} ]]; then
1613		return 1
1614	fi
1615	return 0
1616}
1617
1618#
1619# Wait until a hotspare transitions to a given state or times out.
1620#
1621# Return 0 when  pool/disk matches expected state, 1 on timeout.
1622#
1623function wait_hotspare_state # pool disk state timeout
1624{
1625	typeset pool=$1
1626	typeset disk=${2#$/DEV_DSKDIR/}
1627	typeset state=$3
1628	typeset timeout=${4:-60}
1629	typeset -i i=0
1630
1631	while [[ $i -lt $timeout ]]; do
1632		if check_hotspare_state $pool $disk $state; then
1633			return 0
1634		fi
1635
1636		i=$((i+1))
1637		sleep 1
1638	done
1639
1640	return 1
1641}
1642
1643#
1644# Verify a given slog disk is inuse or avail
1645#
1646# Return 0 is pool/disk matches expected state, 1 otherwise
1647#
1648function check_slog_state # pool disk state{online,offline,unavail}
1649{
1650	typeset pool=$1
1651	typeset disk=${2#/dev/dsk/}
1652	typeset state=$3
1653
1654	cur_state=$(get_device_state $pool $disk "logs")
1655
1656	if [[ $state != ${cur_state} ]]; then
1657		return 1
1658	fi
1659	return 0
1660}
1661
1662#
1663# Verify a given vdev disk is inuse or avail
1664#
1665# Return 0 is pool/disk matches expected state, 1 otherwise
1666#
1667function check_vdev_state # pool disk state{online,offline,unavail}
1668{
1669	typeset pool=$1
1670	typeset disk=${2#/dev/dsk/}
1671	typeset state=$3
1672
1673	cur_state=$(get_device_state $pool $disk)
1674
1675	if [[ $state != ${cur_state} ]]; then
1676		return 1
1677	fi
1678	return 0
1679}
1680
1681#
1682# Wait until a vdev transitions to a given state or times out.
1683#
1684# Return 0 when  pool/disk matches expected state, 1 on timeout.
1685#
1686function wait_vdev_state # pool disk state timeout
1687{
1688	typeset pool=$1
1689	typeset disk=${2#$/DEV_DSKDIR/}
1690	typeset state=$3
1691	typeset timeout=${4:-60}
1692	typeset -i i=0
1693
1694	while [[ $i -lt $timeout ]]; do
1695		if check_vdev_state $pool $disk $state; then
1696			return 0
1697		fi
1698
1699		i=$((i+1))
1700		sleep 1
1701	done
1702
1703	return 1
1704}
1705
1706#
1707# Check the output of 'zpool status -v <pool>',
1708# and to see if the content of <token> contain the <keyword> specified.
1709#
1710# Return 0 is contain, 1 otherwise
1711#
1712function check_pool_status # pool token keyword <verbose>
1713{
1714	typeset pool=$1
1715	typeset token=$2
1716	typeset keyword=$3
1717	typeset verbose=${4:-false}
1718
1719	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1720		($1==token) {print $0}')
1721	if [[ $verbose == true ]]; then
1722		log_note $scan
1723	fi
1724	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1725
1726	return $?
1727}
1728
1729#
1730# These 6 following functions are instance of check_pool_status()
1731#	is_pool_resilvering - to check if the pool is resilver in progress
1732#	is_pool_resilvered - to check if the pool is resilver completed
1733#	is_pool_scrubbing - to check if the pool is scrub in progress
1734#	is_pool_scrubbed - to check if the pool is scrub completed
1735#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1736#	is_pool_scrub_paused - to check if the pool has scrub paused
1737#	is_pool_removing - to check if the pool is removing a vdev
1738#	is_pool_removed - to check if the pool is remove completed
1739#
1740function is_pool_resilvering #pool <verbose>
1741{
1742	check_pool_status "$1" "scan" "resilver in progress since " $2
1743	return $?
1744}
1745
1746function is_pool_resilvered #pool <verbose>
1747{
1748	check_pool_status "$1" "scan" "resilvered " $2
1749	return $?
1750}
1751
1752function is_pool_scrubbing #pool <verbose>
1753{
1754	check_pool_status "$1" "scan" "scrub in progress since " $2
1755	return $?
1756}
1757
1758function is_pool_scrubbed #pool <verbose>
1759{
1760	check_pool_status "$1" "scan" "scrub repaired" $2
1761	return $?
1762}
1763
1764function is_pool_scrub_stopped #pool <verbose>
1765{
1766	check_pool_status "$1" "scan" "scrub canceled" $2
1767	return $?
1768}
1769
1770function is_pool_scrub_paused #pool <verbose>
1771{
1772	check_pool_status "$1" "scan" "scrub paused since " $2
1773	return $?
1774}
1775
1776function is_pool_removing #pool
1777{
1778	check_pool_status "$1" "remove" "in progress since "
1779	return $?
1780}
1781
1782function is_pool_removed #pool
1783{
1784	check_pool_status "$1" "remove" "completed on"
1785	return $?
1786}
1787
1788function wait_for_degraded
1789{
1790	typeset pool=$1
1791	typeset timeout=${2:-30}
1792	typeset t0=$SECONDS
1793
1794	while :; do
1795		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1796		log_note "$pool is not yet degraded."
1797		sleep 1
1798		if ((SECONDS - t0 > $timeout)); then
1799			log_note "$pool not degraded after $timeout seconds."
1800			return 1
1801		fi
1802	done
1803
1804	return 0
1805}
1806
1807#
1808# Wait for a pool to be scrubbed
1809#
1810# $1 pool name
1811# $2 number of seconds to wait (optional)
1812#
1813# Returns true when pool has been scrubbed, or false if there's a timeout or if
1814# no scrub was done.
1815#
1816function wait_scrubbed
1817{
1818	typeset pool=${1:-$TESTPOOL}
1819	while true ; do
1820		is_pool_scrubbed $pool && break
1821		log_must sleep 1
1822	done
1823}
1824
1825#
1826# Use create_pool()/destroy_pool() to clean up the infomation in
1827# in the given disk to avoid slice overlapping.
1828#
1829function cleanup_devices #vdevs
1830{
1831	typeset pool="foopool$$"
1832
1833	if poolexists $pool ; then
1834		destroy_pool $pool
1835	fi
1836
1837	create_pool $pool $@
1838	destroy_pool $pool
1839
1840	return 0
1841}
1842
1843#/**
1844# A function to find and locate free disks on a system or from given
1845# disks as the parameter. It works by locating disks that are in use
1846# as swap devices and dump devices, and also disks listed in /etc/vfstab
1847#
1848# $@ given disks to find which are free, default is all disks in
1849# the test system
1850#
1851# @return a string containing the list of available disks
1852#*/
1853function find_disks
1854{
1855	sfi=/tmp/swaplist.$$
1856	dmpi=/tmp/dumpdev.$$
1857	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1858
1859	swap -l > $sfi
1860	dumpadm > $dmpi 2>/dev/null
1861
1862# write an awk script that can process the output of format
1863# to produce a list of disks we know about. Note that we have
1864# to escape "$2" so that the shell doesn't interpret it while
1865# we're creating the awk script.
1866# -------------------
1867	cat > /tmp/find_disks.awk <<EOF
1868#!/bin/nawk -f
1869	BEGIN { FS="."; }
1870
1871	/^Specify disk/{
1872		searchdisks=0;
1873	}
1874
1875	{
1876		if (searchdisks && \$2 !~ "^$"){
1877			split(\$2,arr," ");
1878			print arr[1];
1879		}
1880	}
1881
1882	/^AVAILABLE DISK SELECTIONS:/{
1883		searchdisks=1;
1884	}
1885EOF
1886#---------------------
1887
1888	chmod 755 /tmp/find_disks.awk
1889	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1890	rm /tmp/find_disks.awk
1891
1892	unused=""
1893	for disk in $disks; do
1894	# Check for mounted
1895		grep "${disk}[sp]" /etc/mnttab >/dev/null
1896		(($? == 0)) && continue
1897	# Check for swap
1898		grep "${disk}[sp]" $sfi >/dev/null
1899		(($? == 0)) && continue
1900	# check for dump device
1901		grep "${disk}[sp]" $dmpi >/dev/null
1902		(($? == 0)) && continue
1903	# check to see if this disk hasn't been explicitly excluded
1904	# by a user-set environment variable
1905		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1906		(($? == 0)) && continue
1907		unused_candidates="$unused_candidates $disk"
1908	done
1909	rm $sfi
1910	rm $dmpi
1911
1912# now just check to see if those disks do actually exist
1913# by looking for a device pointing to the first slice in
1914# each case. limit the number to max_finddisksnum
1915	count=0
1916	for disk in $unused_candidates; do
1917		if [ -b /dev/dsk/${disk}s0 ]; then
1918		if [ $count -lt $max_finddisksnum ]; then
1919			unused="$unused $disk"
1920			# do not impose limit if $@ is provided
1921			[[ -z $@ ]] && ((count = count + 1))
1922		fi
1923		fi
1924	done
1925
1926# finally, return our disk list
1927	echo $unused
1928}
1929
1930#
1931# Add specified user to specified group
1932#
1933# $1 group name
1934# $2 user name
1935# $3 base of the homedir (optional)
1936#
1937function add_user #<group_name> <user_name> <basedir>
1938{
1939	typeset gname=$1
1940	typeset uname=$2
1941	typeset basedir=${3:-"/var/tmp"}
1942
1943	if ((${#gname} == 0 || ${#uname} == 0)); then
1944		log_fail "group name or user name are not defined."
1945	fi
1946
1947	log_must useradd -g $gname -d $basedir/$uname -m $uname
1948	log_must passwd -N $uname
1949
1950	return 0
1951}
1952
1953#
1954# Delete the specified user.
1955#
1956# $1 login name
1957# $2 base of the homedir (optional)
1958#
1959function del_user #<logname> <basedir>
1960{
1961	typeset user=$1
1962	typeset basedir=${2:-"/var/tmp"}
1963
1964	if ((${#user} == 0)); then
1965		log_fail "login name is necessary."
1966	fi
1967
1968	if id $user > /dev/null 2>&1; then
1969		log_must userdel $user
1970	fi
1971
1972	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1973
1974	return 0
1975}
1976
1977#
1978# Select valid gid and create specified group.
1979#
1980# $1 group name
1981#
1982function add_group #<group_name>
1983{
1984	typeset group=$1
1985
1986	if ((${#group} == 0)); then
1987		log_fail "group name is necessary."
1988	fi
1989
1990	# Assign 100 as the base gid
1991	typeset -i gid=100
1992	while true; do
1993		groupadd -g $gid $group > /dev/null 2>&1
1994		typeset -i ret=$?
1995		case $ret in
1996			0) return 0 ;;
1997			# The gid is not  unique
1998			4) ((gid += 1)) ;;
1999			*) return 1 ;;
2000		esac
2001	done
2002}
2003
2004#
2005# Delete the specified group.
2006#
2007# $1 group name
2008#
2009function del_group #<group_name>
2010{
2011	typeset grp=$1
2012	if ((${#grp} == 0)); then
2013		log_fail "group name is necessary."
2014	fi
2015
2016	groupmod -n $grp $grp > /dev/null 2>&1
2017	typeset -i ret=$?
2018	case $ret in
2019		# Group does not exist.
2020		6) return 0 ;;
2021		# Name already exists as a group name
2022		9) log_must groupdel $grp ;;
2023		*) return 1 ;;
2024	esac
2025
2026	return 0
2027}
2028
2029#
2030# This function will return true if it's safe to destroy the pool passed
2031# as argument 1. It checks for pools based on zvols and files, and also
2032# files contained in a pool that may have a different mountpoint.
2033#
2034function safe_to_destroy_pool { # $1 the pool name
2035
2036	typeset pool=""
2037	typeset DONT_DESTROY=""
2038
2039	# We check that by deleting the $1 pool, we're not
2040	# going to pull the rug out from other pools. Do this
2041	# by looking at all other pools, ensuring that they
2042	# aren't built from files or zvols contained in this pool.
2043
2044	for pool in $(zpool list -H -o name)
2045	do
2046		ALTMOUNTPOOL=""
2047
2048		# this is a list of the top-level directories in each of the
2049		# files that make up the path to the files the pool is based on
2050		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2051			awk '{print $1}')
2052
2053		# this is a list of the zvols that make up the pool
2054		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
2055		    | awk '{print $1}')
2056
2057		# also want to determine if it's a file-based pool using an
2058		# alternate mountpoint...
2059		POOL_FILE_DIRS=$(zpool status -v $pool | \
2060					grep / | awk '{print $1}' | \
2061					awk -F/ '{print $2}' | grep -v "dev")
2062
2063		for pooldir in $POOL_FILE_DIRS
2064		do
2065			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2066					grep "${pooldir}$" | awk '{print $1}')
2067
2068			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2069		done
2070
2071
2072		if [ ! -z "$ZVOLPOOL" ]
2073		then
2074			DONT_DESTROY="true"
2075			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2076		fi
2077
2078		if [ ! -z "$FILEPOOL" ]
2079		then
2080			DONT_DESTROY="true"
2081			log_note "Pool $pool is built from $FILEPOOL on $1"
2082		fi
2083
2084		if [ ! -z "$ALTMOUNTPOOL" ]
2085		then
2086			DONT_DESTROY="true"
2087			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2088		fi
2089	done
2090
2091	if [ -z "${DONT_DESTROY}" ]
2092	then
2093		return 0
2094	else
2095		log_note "Warning: it is not safe to destroy $1!"
2096		return 1
2097	fi
2098}
2099
2100#
2101# Get the available ZFS compression options
2102# $1 option type zfs_set|zfs_compress
2103#
2104function get_compress_opts
2105{
2106	typeset COMPRESS_OPTS
2107	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2108			gzip-6 gzip-7 gzip-8 gzip-9"
2109
2110	if [[ $1 == "zfs_compress" ]] ; then
2111		COMPRESS_OPTS="on lzjb"
2112	elif [[ $1 == "zfs_set" ]] ; then
2113		COMPRESS_OPTS="on off lzjb"
2114	fi
2115	typeset valid_opts="$COMPRESS_OPTS"
2116	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2117	if [[ $? -eq 0 ]]; then
2118		valid_opts="$valid_opts $GZIP_OPTS"
2119	fi
2120	echo "$valid_opts"
2121}
2122
2123#
2124# Verify zfs operation with -p option work as expected
2125# $1 operation, value could be create, clone or rename
2126# $2 dataset type, value could be fs or vol
2127# $3 dataset name
2128# $4 new dataset name
2129#
2130function verify_opt_p_ops
2131{
2132	typeset ops=$1
2133	typeset datatype=$2
2134	typeset dataset=$3
2135	typeset newdataset=$4
2136
2137	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2138		log_fail "$datatype is not supported."
2139	fi
2140
2141	# check parameters accordingly
2142	case $ops in
2143		create)
2144			newdataset=$dataset
2145			dataset=""
2146			if [[ $datatype == "vol" ]]; then
2147				ops="create -V $VOLSIZE"
2148			fi
2149			;;
2150		clone)
2151			if [[ -z $newdataset ]]; then
2152				log_fail "newdataset should not be empty" \
2153					"when ops is $ops."
2154			fi
2155			log_must datasetexists $dataset
2156			log_must snapexists $dataset
2157			;;
2158		rename)
2159			if [[ -z $newdataset ]]; then
2160				log_fail "newdataset should not be empty" \
2161					"when ops is $ops."
2162			fi
2163			log_must datasetexists $dataset
2164			log_mustnot snapexists $dataset
2165			;;
2166		*)
2167			log_fail "$ops is not supported."
2168			;;
2169	esac
2170
2171	# make sure the upper level filesystem does not exist
2172	if datasetexists ${newdataset%/*} ; then
2173		log_must zfs destroy -rRf ${newdataset%/*}
2174	fi
2175
2176	# without -p option, operation will fail
2177	log_mustnot zfs $ops $dataset $newdataset
2178	log_mustnot datasetexists $newdataset ${newdataset%/*}
2179
2180	# with -p option, operation should succeed
2181	log_must zfs $ops -p $dataset $newdataset
2182	if ! datasetexists $newdataset ; then
2183		log_fail "-p option does not work for $ops"
2184	fi
2185
2186	# when $ops is create or clone, redo the operation still return zero
2187	if [[ $ops != "rename" ]]; then
2188		log_must zfs $ops -p $dataset $newdataset
2189	fi
2190
2191	return 0
2192}
2193
2194#
2195# Get configuration of pool
2196# $1 pool name
2197# $2 config name
2198#
2199function get_config
2200{
2201	typeset pool=$1
2202	typeset config=$2
2203	typeset alt_root
2204
2205	if ! poolexists "$pool" ; then
2206		return 1
2207	fi
2208	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2209	if [[ $alt_root == "-" ]]; then
2210		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2211		    '{print $2}')
2212	else
2213		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2214		    '{print $2}')
2215	fi
2216	if [[ -n $value ]] ; then
2217		value=${value#'}
2218		value=${value%'}
2219	fi
2220	echo $value
2221
2222	return 0
2223}
2224
2225#
2226# Privated function. Random select one of items from arguments.
2227#
2228# $1 count
2229# $2-n string
2230#
2231function _random_get
2232{
2233	typeset cnt=$1
2234	shift
2235
2236	typeset str="$@"
2237	typeset -i ind
2238	((ind = RANDOM % cnt + 1))
2239
2240	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2241	echo $ret
2242}
2243
2244#
2245# Random select one of item from arguments which include NONE string
2246#
2247function random_get_with_non
2248{
2249	typeset -i cnt=$#
2250	((cnt =+ 1))
2251
2252	_random_get "$cnt" "$@"
2253}
2254
2255#
2256# Random select one of item from arguments which doesn't include NONE string
2257#
2258function random_get
2259{
2260	_random_get "$#" "$@"
2261}
2262
2263#
2264# Detect if the current system support slog
2265#
2266function verify_slog_support
2267{
2268	typeset dir=/tmp/disk.$$
2269	typeset pool=foo.$$
2270	typeset vdev=$dir/a
2271	typeset sdev=$dir/b
2272
2273	mkdir -p $dir
2274	mkfile $MINVDEVSIZE $vdev $sdev
2275
2276	typeset -i ret=0
2277	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2278		ret=1
2279	fi
2280	rm -r $dir
2281
2282	return $ret
2283}
2284
2285#
2286# The function will generate a dataset name with specific length
2287# $1, the length of the name
2288# $2, the base string to construct the name
2289#
2290function gen_dataset_name
2291{
2292	typeset -i len=$1
2293	typeset basestr="$2"
2294	typeset -i baselen=${#basestr}
2295	typeset -i iter=0
2296	typeset l_name=""
2297
2298	if ((len % baselen == 0)); then
2299		((iter = len / baselen))
2300	else
2301		((iter = len / baselen + 1))
2302	fi
2303	while ((iter > 0)); do
2304		l_name="${l_name}$basestr"
2305
2306		((iter -= 1))
2307	done
2308
2309	echo $l_name
2310}
2311
2312#
2313# Get cksum tuple of dataset
2314# $1 dataset name
2315#
2316# sample zdb output:
2317# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2318# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2319# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2320# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2321function datasetcksum
2322{
2323	typeset cksum
2324	sync
2325	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2326		| awk -F= '{print $7}')
2327	echo $cksum
2328}
2329
2330#
2331# Get cksum of file
2332# #1 file path
2333#
2334function checksum
2335{
2336	typeset cksum
2337	cksum=$(cksum $1 | awk '{print $1}')
2338	echo $cksum
2339}
2340
2341#
2342# Get the given disk/slice state from the specific field of the pool
2343#
2344function get_device_state #pool disk field("", "spares","logs")
2345{
2346	typeset pool=$1
2347	typeset disk=${2#/dev/dsk/}
2348	typeset field=${3:-$pool}
2349
2350	state=$(zpool status -v "$pool" 2>/dev/null | \
2351		nawk -v device=$disk -v pool=$pool -v field=$field \
2352		'BEGIN {startconfig=0; startfield=0; }
2353		/config:/ {startconfig=1}
2354		(startconfig==1) && ($1==field) {startfield=1; next;}
2355		(startfield==1) && ($1==device) {print $2; exit;}
2356		(startfield==1) &&
2357		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2358	echo $state
2359}
2360
2361
2362#
2363# print the given directory filesystem type
2364#
2365# $1 directory name
2366#
2367function get_fstype
2368{
2369	typeset dir=$1
2370
2371	if [[ -z $dir ]]; then
2372		log_fail "Usage: get_fstype <directory>"
2373	fi
2374
2375	#
2376	#  $ df -n /
2377	#  /		  : ufs
2378	#
2379	df -n $dir | awk '{print $3}'
2380}
2381
2382#
2383# Given a disk, label it to VTOC regardless what label was on the disk
2384# $1 disk
2385#
2386function labelvtoc
2387{
2388	typeset disk=$1
2389	if [[ -z $disk ]]; then
2390		log_fail "The disk name is unspecified."
2391	fi
2392	typeset label_file=/var/tmp/labelvtoc.$$
2393	typeset arch=$(uname -p)
2394
2395	if [[ $arch == "i386" ]]; then
2396		echo "label" > $label_file
2397		echo "0" >> $label_file
2398		echo "" >> $label_file
2399		echo "q" >> $label_file
2400		echo "q" >> $label_file
2401
2402		fdisk -B $disk >/dev/null 2>&1
2403		# wait a while for fdisk finishes
2404		sleep 60
2405	elif [[ $arch == "sparc" ]]; then
2406		echo "label" > $label_file
2407		echo "0" >> $label_file
2408		echo "" >> $label_file
2409		echo "" >> $label_file
2410		echo "" >> $label_file
2411		echo "q" >> $label_file
2412	else
2413		log_fail "unknown arch type"
2414	fi
2415
2416	format -e -s -d $disk -f $label_file
2417	typeset -i ret_val=$?
2418	rm -f $label_file
2419	#
2420	# wait the format to finish
2421	#
2422	sleep 60
2423	if ((ret_val != 0)); then
2424		log_fail "unable to label $disk as VTOC."
2425	fi
2426
2427	return 0
2428}
2429
2430#
2431# check if the system was installed as zfsroot or not
2432# return: 0 ture, otherwise false
2433#
2434function is_zfsroot
2435{
2436	df -n / | grep zfs > /dev/null 2>&1
2437	return $?
2438}
2439
2440#
2441# get the root filesystem name if it's zfsroot system.
2442#
2443# return: root filesystem name
2444function get_rootfs
2445{
2446	typeset rootfs=""
2447	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2448		/etc/mnttab)
2449	if [[ -z "$rootfs" ]]; then
2450		log_fail "Can not get rootfs"
2451	fi
2452	zfs list $rootfs > /dev/null 2>&1
2453	if (($? == 0)); then
2454		echo $rootfs
2455	else
2456		log_fail "This is not a zfsroot system."
2457	fi
2458}
2459
2460#
2461# get the rootfs's pool name
2462# return:
2463#       rootpool name
2464#
2465function get_rootpool
2466{
2467	typeset rootfs=""
2468	typeset rootpool=""
2469	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2470		 /etc/mnttab)
2471	if [[ -z "$rootfs" ]]; then
2472		log_fail "Can not get rootpool"
2473	fi
2474	zfs list $rootfs > /dev/null 2>&1
2475	if (($? == 0)); then
2476		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2477		echo $rootpool
2478	else
2479		log_fail "This is not a zfsroot system."
2480	fi
2481}
2482
2483#
2484# Check if the given device is physical device
2485#
2486function is_physical_device #device
2487{
2488	typeset device=${1#/dev/dsk/}
2489	device=${device#/dev/rdsk/}
2490
2491	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2492	return $?
2493}
2494
2495#
2496# Get the directory path of given device
2497#
2498function get_device_dir #device
2499{
2500	typeset device=$1
2501
2502	if ! $(is_physical_device $device) ; then
2503		if [[ $device != "/" ]]; then
2504			device=${device%/*}
2505		fi
2506		echo $device
2507	else
2508		echo "/dev/dsk"
2509	fi
2510}
2511
2512#
2513# Get the package name
2514#
2515function get_package_name
2516{
2517	typeset dirpath=${1:-$STC_NAME}
2518
2519	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2520}
2521
2522#
2523# Get the word numbers from a string separated by white space
2524#
2525function get_word_count
2526{
2527	echo $1 | wc -w
2528}
2529
2530#
2531# To verify if the require numbers of disks is given
2532#
2533function verify_disk_count
2534{
2535	typeset -i min=${2:-1}
2536
2537	typeset -i count=$(get_word_count "$1")
2538
2539	if ((count < min)); then
2540		log_untested "A minimum of $min disks is required to run." \
2541			" You specified $count disk(s)"
2542	fi
2543}
2544
2545function ds_is_volume
2546{
2547	typeset type=$(get_prop type $1)
2548	[[ $type = "volume" ]] && return 0
2549	return 1
2550}
2551
2552function ds_is_filesystem
2553{
2554	typeset type=$(get_prop type $1)
2555	[[ $type = "filesystem" ]] && return 0
2556	return 1
2557}
2558
2559function ds_is_snapshot
2560{
2561	typeset type=$(get_prop type $1)
2562	[[ $type = "snapshot" ]] && return 0
2563	return 1
2564}
2565
2566#
2567# Check if Trusted Extensions are installed and enabled
2568#
2569function is_te_enabled
2570{
2571	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2572	if (($? != 0)); then
2573		return 1
2574	else
2575		return 0
2576	fi
2577}
2578
2579# Utility function to determine if a system has multiple cpus.
2580function is_mp
2581{
2582	(($(psrinfo | wc -l) > 1))
2583}
2584
2585function get_cpu_freq
2586{
2587	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2588}
2589
2590# Run the given command as the user provided.
2591function user_run
2592{
2593	typeset user=$1
2594	shift
2595
2596	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2597	return $?
2598}
2599
2600#
2601# Check if the pool contains the specified vdevs
2602#
2603# $1 pool
2604# $2..n <vdev> ...
2605#
2606# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2607# vdevs is not in the pool, and 2 if pool name is missing.
2608#
2609function vdevs_in_pool
2610{
2611	typeset pool=$1
2612	typeset vdev
2613
2614        if [[ -z $pool ]]; then
2615                log_note "Missing pool name."
2616                return 2
2617        fi
2618
2619	shift
2620
2621	typeset tmpfile=$(mktemp)
2622	zpool list -Hv "$pool" >$tmpfile
2623	for vdev in $@; do
2624		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2625		[[ $? -ne 0 ]] && return 1
2626	done
2627
2628	rm -f $tmpfile
2629
2630	return 0;
2631}
2632
2633function get_max
2634{
2635	typeset -l i max=$1
2636	shift
2637
2638	for i in "$@"; do
2639		max=$(echo $((max > i ? max : i)))
2640	done
2641
2642	echo $max
2643}
2644
2645function get_min
2646{
2647	typeset -l i min=$1
2648	shift
2649
2650	for i in "$@"; do
2651		min=$(echo $((min < i ? min : i)))
2652	done
2653
2654	echo $min
2655}
2656
2657#
2658# Generate a random number between 1 and the argument.
2659#
2660function random
2661{
2662        typeset max=$1
2663        echo $(( ($RANDOM % $max) + 1 ))
2664}
2665
2666# Write data that can be compressed into a directory
2667function write_compressible
2668{
2669	typeset dir=$1
2670	typeset megs=$2
2671	typeset nfiles=${3:-1}
2672	typeset bs=${4:-1024k}
2673	typeset fname=${5:-file}
2674
2675	[[ -d $dir ]] || log_fail "No directory: $dir"
2676
2677	log_must eval "fio \
2678	    --name=job \
2679	    --fallocate=0 \
2680	    --minimal \
2681	    --randrepeat=0 \
2682	    --buffer_compress_percentage=66 \
2683	    --buffer_compress_chunk=4096 \
2684	    --directory=$dir \
2685	    --numjobs=$nfiles \
2686	    --rw=write \
2687	    --bs=$bs \
2688	    --filesize=$megs \
2689	    --filename_format='$fname.\$jobnum' >/dev/null"
2690}
2691
2692function get_objnum
2693{
2694	typeset pathname=$1
2695	typeset objnum
2696
2697	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2698	objnum=$(stat -c %i $pathname)
2699	echo $objnum
2700}
2701
2702#
2703# Sync data to the pool
2704#
2705# $1 pool name
2706# $2 boolean to force uberblock (and config including zpool cache file) update
2707#
2708function sync_pool #pool <force>
2709{
2710	typeset pool=${1:-$TESTPOOL}
2711	typeset force=${2:-false}
2712
2713	if [[ $force == true ]]; then
2714		log_must zpool sync -f $pool
2715	else
2716		log_must zpool sync $pool
2717	fi
2718
2719	return 0
2720}
2721
2722#
2723# Prints the current time in seconds since UNIX Epoch.
2724#
2725function current_epoch
2726{
2727	printf '%(%s)T'
2728}
2729
2730#
2731# Get decimal value of global uint32_t variable using mdb.
2732#
2733function mdb_get_uint32
2734{
2735	typeset variable=$1
2736	typeset value
2737
2738	value=$(mdb -k -e "$variable/X | ::eval .=U")
2739	if [[ $? -ne 0 ]]; then
2740		log_fail "Failed to get value of '$variable' from mdb."
2741		return 1
2742	fi
2743
2744	echo $value
2745	return 0
2746}
2747
2748#
2749# Wait for every device replace operation to complete
2750#
2751# $1 pool name
2752#
2753function wait_replacing #pool
2754{
2755	typeset pool=${1:-$TESTPOOL}
2756	while true; do
2757		[[ "" == "$(zpool status $pool |
2758		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
2759		log_must sleep 1
2760	done
2761}
2762
2763#
2764# Set global uint32_t variable to a decimal value using mdb.
2765#
2766function mdb_set_uint32
2767{
2768	typeset variable=$1
2769	typeset value=$2
2770
2771	mdb -kw -e "$variable/W 0t$value" > /dev/null
2772	if [[ $? -ne 0 ]]; then
2773		echo "Failed to set '$variable' to '$value' in mdb."
2774		return 1
2775	fi
2776
2777	return 0
2778}
2779
2780#
2781# Set global scalar integer variable to a hex value using mdb.
2782# Note: Target should have CTF data loaded.
2783#
2784function mdb_ctf_set_int
2785{
2786	typeset variable=$1
2787	typeset value=$2
2788
2789	mdb -kw -e "$variable/z $value" > /dev/null
2790	if [[ $? -ne 0 ]]; then
2791		echo "Failed to set '$variable' to '$value' in mdb."
2792		return 1
2793	fi
2794
2795	return 0
2796}
2797
2798#
2799# Set a global system tunable (64-bit value)
2800#
2801# $1 tunable name
2802# $2 tunable values
2803#
2804function set_tunable64
2805{
2806	set_tunable_impl "$1" "$2" Z
2807}
2808
2809#
2810# Set a global system tunable (32-bit value)
2811#
2812# $1 tunable name
2813# $2 tunable values
2814#
2815function set_tunable32
2816{
2817	set_tunable_impl "$1" "$2" W
2818}
2819
2820function set_tunable_impl
2821{
2822	typeset tunable="$1"
2823	typeset value="$2"
2824	typeset mdb_cmd="$3"
2825	typeset module="${4:-zfs}"
2826
2827	[[ -z "$tunable" ]] && return 1
2828	[[ -z "$value" ]] && return 1
2829	[[ -z "$mdb_cmd" ]] && return 1
2830
2831	case "$(uname)" in
2832	Linux)
2833		typeset zfs_tunables="/sys/module/$module/parameters"
2834		[[ -w "$zfs_tunables/$tunable" ]] || return 1
2835		cat >"$zfs_tunables/$tunable" <<<"$value"
2836		return $?
2837		;;
2838	SunOS)
2839		[[ "$module" -eq "zfs" ]] || return 1
2840		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
2841		return $?
2842		;;
2843	esac
2844}
2845
2846#
2847# Get a global system tunable
2848#
2849# $1 tunable name
2850#
2851function get_tunable
2852{
2853	get_tunable_impl "$1"
2854}
2855
2856function get_tunable_impl
2857{
2858	typeset tunable="$1"
2859	typeset module="${2:-zfs}"
2860
2861	[[ -z "$tunable" ]] && return 1
2862
2863	case "$(uname)" in
2864	Linux)
2865		typeset zfs_tunables="/sys/module/$module/parameters"
2866		[[ -f "$zfs_tunables/$tunable" ]] || return 1
2867		cat $zfs_tunables/$tunable
2868		return $?
2869		;;
2870	SunOS)
2871		typeset value=$(mdb -k -e "$tunable/X | ::eval .=U")
2872		if [[ $? -ne 0 ]]; then
2873			log_fail "Failed to get value of '$tunable' from mdb."
2874			return 1
2875		fi
2876		echo $value
2877		return 0
2878		;;
2879	esac
2880
2881	return 1
2882}
2883
2884#
2885# Compute SHA256 digest for given file or stdin if no file given.
2886# Note: file path must not contain spaces
2887#
2888function sha256digest
2889{
2890        typeset file=$1
2891
2892	if [ -x /usr/bin/digest ]; then
2893		/usr/bin/digest -a sha256 $file
2894	elif [ -x /usr/bin/sha256sum ]; then
2895		/usr/bin/sha256sum -b $file | awk '{ print $1 }'
2896	else
2897		echo "Cannot calculate SHA256 digest"
2898		return 1
2899	fi
2900	return 0
2901}
2902