xref: /illumos-gate/usr/src/test/zfs-tests/tests/functional/refreserv/refreserv_multi_raidz.ksh (revision f52943a93040563107b95bccb9db87d9971ef47d)
1#!/bin/ksh -p
2#
3# This file and its contents are supplied under the terms of the
4# Common Development and Distribution License ("CDDL"), version 1.0.
5# You may only use this file in accordance with the terms of version
6# 1.0 of the CDDL.
7#
8# A full copy of the text of the CDDL should have accompanied this
9# source.  A copy of the CDDL is also available via the Internet at
10# http://www.illumos.org/license/CDDL.
11#
12
13#
14# Copyright 2019 Joyent, Inc.
15#
16
17. $STF_SUITE/include/libtest.shlib
18. $STF_SUITE/tests/functional/refreserv/refreserv.cfg
19
20#
21# DESCRIPTION:
22#	raidz refreservation=auto picks worst raidz vdev
23#
24# STRATEGY:
25#	1. Create a pool with a single raidz vdev
26#	2. For each block size [512b, 1k, 128k] or [4k, 8k, 128k]
27#	    - create a volume
28#	    - remember its refreservation
29#	    - destroy the volume
30#	3. Destroy the pool
31#	4. Recreate the pool with one more disk in the vdev, then repeat steps
32#	   2 and 3.
33#
34# NOTES:
35#	1. This test will use up to 14 disks but can cover the key concepts with
36#	   5 disks.
37#	2. If the disks are a mixture of 4Kn and 512n/512e, failures are likely.
38#
39
40verify_runnable "global"
41
42typeset -a alldisks=($DISKS)
43
44# The larger the volsize, the better zvol_volsize_to_reservation() is at
45# guessing the right number - though it is horrible with tiny blocks.  At 10M on
46# ashift=12, the estimate may be over 26% too high.
47volsize=100
48
49function cleanup
50{
51	default_cleanup_noexit
52	default_setup_noexit "${alldisks[0]}"
53}
54
55log_assert "raidz refreservation=auto picks worst raidz vdev"
56log_onexit cleanup
57
58poolexists "$TESTPOOL" && log_must zpool destroy "$TESTPOOL"
59
60# Testing tiny block sizes on ashift=12 pools causes so much size inflation
61# that small test disks may fill before creating small volumes.  However,
62# testing 512b and 1K blocks on ashift=9 pools is an ok approximation for
63# testing the problems that arise from 4K and 8K blocks on ashift=12 pools.
64bps=$(prtvtoc /dev/rdsk/${alldisks[0]} |
65    awk '$NF == "bytes/sector" { print $2; exit 0 }')
66case "$bps" in
67512)
68	allshifts=(9 10 17)
69	;;
704096)
71	allshifts=(12 13 17)
72	;;
73*)
74	log_fail "bytes/sector != (512|4096)"
75	;;
76esac
77log_note "Testing in ashift=${allshifts[0]} mode"
78
79typeset -A sizes=
80
81#
82# Determine the refreservation for a $volsize MiB volume on each raidz type at
83# various block sizes.
84#
85for parity in 1 2 3; do
86	raid=raidz$parity
87	typeset -A sizes["$raid"]
88
89	# Ensure we hit scenarios with and without skip blocks
90	for ndisks in $((parity * 2)) $((parity * 2 + 1)); do
91		typeset -a disks=(${alldisks[0..$((ndisks - 1))]})
92
93		if (( ${#disks[@]} < ndisks )); then
94			log_note "Too few disks to test $raid-$ndisks"
95			continue
96		fi
97
98		typeset -A sizes["$raid"]["$ndisks"]
99
100		log_must zpool create "$TESTPOOL" "$raid" "${disks[@]}"
101
102		for bits in "${allshifts[@]}"; do
103			vbs=$((1 << bits))
104			log_note "Gathering refreservation for $raid-$ndisks" \
105			    "volblocksize=$vbs"
106
107			vol=$TESTPOOL/$TESTVOL
108			log_must zfs create -V ${volsize}m \
109			    -o volblocksize=$vbs "$vol"
110
111			refres=$(zfs get -Hpo value refreservation "$vol")
112			log_must test -n "$refres"
113			sizes["$raid"]["$ndisks"]["$vbs"]=$refres
114
115			log_must zfs destroy "$vol"
116		done
117
118		log_must zpool destroy "$TESTPOOL"
119	done
120done
121
122# A little extra info is always helpful when diagnosing problems.  To
123# pretty-print what you find in the log, do this in ksh:
124#   typeset -A sizes=(...)
125#   print -v sizes
126log_note "sizes=$(print -C sizes)"
127
128#
129# Helper furnction for checking that refreservation is calculated properly in
130# multi-vdev pools.  "Properly" is defined as assuming that all vdevs are as
131# space inefficient as the worst one.
132#
133function check_vdevs {
134	typeset raid=$1
135	typeset nd1=$2
136	typeset nd2=$3
137	typeset -a disks1 disks2
138	typeset vbs vol refres refres1 refres2 expect
139
140	disks1=(${alldisks[0..$((nd1 - 1))]})
141	disks2=(${alldisks[$nd1..$((nd1 + nd2 - 1))]})
142	if (( ${#disks2[@]} < nd2 )); then
143		log_note "Too few disks to test $raid-$nd1 + $raid=$nd2"
144		return
145	fi
146
147	log_must zpool create -f "$TESTPOOL" \
148	    "$raid" "${disks1[@]}" "$raid" "${disks2[@]}"
149
150	for bits in "${allshifts[@]}"; do
151		vbs=$((1 << bits))
152		log_note "Verifying $raid-$nd1 $raid-$nd2 volblocksize=$vbs"
153
154		vol=$TESTPOOL/$TESTVOL
155		log_must zfs create -V ${volsize}m -o volblocksize=$vbs "$vol"
156		refres=$(zfs get -Hpo value refreservation "$vol")
157		log_must test -n "$refres"
158
159		refres1=${sizes["$raid"]["$nd1"]["$vbs"]}
160		refres2=${sizes["$raid"]["$nd2"]["$vbs"]}
161
162		if (( refres1 > refres2 )); then
163			log_note "Expecting refres ($refres) to match refres" \
164			   "from $raid-$nd1 ($refres1)"
165			log_must test "$refres" -eq "$refres1"
166		else
167			log_note "Expecting refres ($refres) to match refres" \
168			   "from $raid-$nd1 ($refres2)"
169			log_must test "$refres" -eq "$refres2"
170		fi
171
172		log_must zfs destroy "$vol"
173	done
174
175	log_must zpool destroy "$TESTPOOL"
176}
177
178#
179# Verify that multi-vdev pools use the last optimistic size for all the
180# permutations within a particular raidz variant.
181#
182for raid in "${!sizes[@]}"; do
183	# ksh likes to create a [0] item for us.  Thanks, ksh!
184	[[ $raid == "0" ]] && continue
185
186	for nd1 in "${!sizes["$raid"][@]}"; do
187		[[ $nd1 == "0" ]] && continue
188
189		for nd2 in "${!sizes["$raid"][@]}"; do
190			[[ $nd2 == "0" ]] && continue
191
192			check_vdevs "$raid" "$nd1" "$nd2"
193		done
194	done
195done
196
197log_pass "raidz refreservation=auto picks worst raidz vdev"
198