xref: /illumos-gate/usr/src/uts/common/fs/zfs/space_map.c (revision e153cda9f9660e385e8f468253f80e59f5d454d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
27  */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zio.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
39 
40 /*
41  * Note on space map block size:
42  *
43  * The data for a given space map can be kept on blocks of any size.
44  * Larger blocks entail fewer I/O operations, but they also cause the
45  * DMU to keep more data in-core, and also to waste more I/O bandwidth
46  * when only a few blocks have changed since the last transaction group.
47  */
48 
49 /*
50  * Enabled whenever we want to stress test the use of double-word
51  * space map entries.
52  */
53 boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
54 
55 /*
56  * Override the default indirect block size of 128K, instead using 16K for
57  * spacemaps (2^14 bytes).  This dramatically reduces write inflation since
58  * appending to a spacemap typically has to write one data block (4KB) and one
59  * or two indirect blocks (16K-32K, rather than 128K).
60  */
61 int space_map_ibs = 14;
62 
63 boolean_t
64 sm_entry_is_debug(uint64_t e)
65 {
66 	return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
67 }
68 
69 boolean_t
70 sm_entry_is_single_word(uint64_t e)
71 {
72 	uint8_t prefix = SM_PREFIX_DECODE(e);
73 	return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
74 }
75 
76 boolean_t
77 sm_entry_is_double_word(uint64_t e)
78 {
79 	return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
80 }
81 
82 /*
83  * Iterate through the space map, invoking the callback on each (non-debug)
84  * space map entry.
85  */
86 int
87 space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
88 {
89 	uint64_t sm_len = space_map_length(sm);
90 	ASSERT3U(sm->sm_blksz, !=, 0);
91 
92 	dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, sm_len,
93 	    ZIO_PRIORITY_SYNC_READ);
94 
95 	uint64_t blksz = sm->sm_blksz;
96 	int error = 0;
97 	for (uint64_t block_base = 0; block_base < sm_len && error == 0;
98 	    block_base += blksz) {
99 		dmu_buf_t *db;
100 		error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
101 		    block_base, FTAG, &db, DMU_READ_PREFETCH);
102 		if (error != 0)
103 			return (error);
104 
105 		uint64_t *block_start = db->db_data;
106 		uint64_t block_length = MIN(sm_len - block_base, blksz);
107 		uint64_t *block_end = block_start +
108 		    (block_length / sizeof (uint64_t));
109 
110 		VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
111 		VERIFY3U(block_length, !=, 0);
112 		ASSERT3U(blksz, ==, db->db_size);
113 
114 		for (uint64_t *block_cursor = block_start;
115 		    block_cursor < block_end && error == 0; block_cursor++) {
116 			uint64_t e = *block_cursor;
117 
118 			if (sm_entry_is_debug(e)) /* Skip debug entries */
119 				continue;
120 
121 			uint64_t raw_offset, raw_run, vdev_id;
122 			maptype_t type;
123 			if (sm_entry_is_single_word(e)) {
124 				type = SM_TYPE_DECODE(e);
125 				vdev_id = SM_NO_VDEVID;
126 				raw_offset = SM_OFFSET_DECODE(e);
127 				raw_run = SM_RUN_DECODE(e);
128 			} else {
129 				/* it is a two-word entry */
130 				ASSERT(sm_entry_is_double_word(e));
131 				raw_run = SM2_RUN_DECODE(e);
132 				vdev_id = SM2_VDEV_DECODE(e);
133 
134 				/* move on to the second word */
135 				block_cursor++;
136 				e = *block_cursor;
137 				VERIFY3P(block_cursor, <=, block_end);
138 
139 				type = SM2_TYPE_DECODE(e);
140 				raw_offset = SM2_OFFSET_DECODE(e);
141 			}
142 
143 			uint64_t entry_offset = (raw_offset << sm->sm_shift) +
144 			    sm->sm_start;
145 			uint64_t entry_run = raw_run << sm->sm_shift;
146 
147 			VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
148 			VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
149 			ASSERT3U(entry_offset, >=, sm->sm_start);
150 			ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
151 			ASSERT3U(entry_run, <=, sm->sm_size);
152 			ASSERT3U(entry_offset + entry_run, <=,
153 			    sm->sm_start + sm->sm_size);
154 
155 			space_map_entry_t sme = {
156 			    .sme_type = type,
157 			    .sme_vdev = vdev_id,
158 			    .sme_offset = entry_offset,
159 			    .sme_run = entry_run
160 			};
161 			error = callback(&sme, arg);
162 		}
163 		dmu_buf_rele(db, FTAG);
164 	}
165 	return (error);
166 }
167 
168 /*
169  * Reads the entries from the last block of the space map into
170  * buf in reverse order. Populates nwords with number of words
171  * in the last block.
172  *
173  * Refer to block comment within space_map_incremental_destroy()
174  * to understand why this function is needed.
175  */
176 static int
177 space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
178     uint64_t bufsz, uint64_t *nwords)
179 {
180 	int error = 0;
181 	dmu_buf_t *db;
182 
183 	/*
184 	 * Find the offset of the last word in the space map and use
185 	 * that to read the last block of the space map with
186 	 * dmu_buf_hold().
187 	 */
188 	uint64_t last_word_offset =
189 	    sm->sm_phys->smp_objsize - sizeof (uint64_t);
190 	error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
191 	    FTAG, &db, DMU_READ_NO_PREFETCH);
192 	if (error != 0)
193 		return (error);
194 
195 	ASSERT3U(sm->sm_object, ==, db->db_object);
196 	ASSERT3U(sm->sm_blksz, ==, db->db_size);
197 	ASSERT3U(bufsz, >=, db->db_size);
198 	ASSERT(nwords != NULL);
199 
200 	uint64_t *words = db->db_data;
201 	*nwords =
202 	    (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t);
203 
204 	ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
205 
206 	uint64_t n = *nwords;
207 	uint64_t j = n - 1;
208 	for (uint64_t i = 0; i < n; i++) {
209 		uint64_t entry = words[i];
210 		if (sm_entry_is_double_word(entry)) {
211 			/*
212 			 * Since we are populating the buffer backwards
213 			 * we have to be extra careful and add the two
214 			 * words of the double-word entry in the right
215 			 * order.
216 			 */
217 			ASSERT3U(j, >, 0);
218 			buf[j - 1] = entry;
219 
220 			i++;
221 			ASSERT3U(i, <, n);
222 			entry = words[i];
223 			buf[j] = entry;
224 			j -= 2;
225 		} else {
226 			ASSERT(sm_entry_is_debug(entry) ||
227 			    sm_entry_is_single_word(entry));
228 			buf[j] = entry;
229 			j--;
230 		}
231 	}
232 
233 	/*
234 	 * Assert that we wrote backwards all the
235 	 * way to the beginning of the buffer.
236 	 */
237 	ASSERT3S(j, ==, -1);
238 
239 	dmu_buf_rele(db, FTAG);
240 	return (error);
241 }
242 
243 /*
244  * Note: This function performs destructive actions - specifically
245  * it deletes entries from the end of the space map. Thus, callers
246  * should ensure that they are holding the appropriate locks for
247  * the space map that they provide.
248  */
249 int
250 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
251     dmu_tx_t *tx)
252 {
253 	uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
254 	uint64_t *buf = zio_buf_alloc(bufsz);
255 
256 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
257 
258 	/*
259 	 * Ideally we would want to iterate from the beginning of the
260 	 * space map to the end in incremental steps. The issue with this
261 	 * approach is that we don't have any field on-disk that points
262 	 * us where to start between each step. We could try zeroing out
263 	 * entries that we've destroyed, but this doesn't work either as
264 	 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
265 	 *
266 	 * As a result, we destroy its entries incrementally starting from
267 	 * the end after applying the callback to each of them.
268 	 *
269 	 * The problem with this approach is that we cannot literally
270 	 * iterate through the words in the space map backwards as we
271 	 * can't distinguish two-word space map entries from their second
272 	 * word. Thus we do the following:
273 	 *
274 	 * 1] We get all the entries from the last block of the space map
275 	 *    and put them into a buffer in reverse order. This way the
276 	 *    last entry comes first in the buffer, the second to last is
277 	 *    second, etc.
278 	 * 2] We iterate through the entries in the buffer and we apply
279 	 *    the callback to each one. As we move from entry to entry we
280 	 *    we decrease the size of the space map, deleting effectively
281 	 *    each entry.
282 	 * 3] If there are no more entries in the space map or the callback
283 	 *    returns a value other than 0, we stop iterating over the
284 	 *    space map. If there are entries remaining and the callback
285 	 *    returned 0, we go back to step [1].
286 	 */
287 	int error = 0;
288 	while (space_map_length(sm) > 0 && error == 0) {
289 		uint64_t nwords = 0;
290 		error = space_map_reversed_last_block_entries(sm, buf, bufsz,
291 		    &nwords);
292 		if (error != 0)
293 			break;
294 
295 		ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
296 
297 		for (uint64_t i = 0; i < nwords; i++) {
298 			uint64_t e = buf[i];
299 
300 			if (sm_entry_is_debug(e)) {
301 				sm->sm_phys->smp_objsize -= sizeof (uint64_t);
302 				space_map_update(sm);
303 				continue;
304 			}
305 
306 			int words = 1;
307 			uint64_t raw_offset, raw_run, vdev_id;
308 			maptype_t type;
309 			if (sm_entry_is_single_word(e)) {
310 				type = SM_TYPE_DECODE(e);
311 				vdev_id = SM_NO_VDEVID;
312 				raw_offset = SM_OFFSET_DECODE(e);
313 				raw_run = SM_RUN_DECODE(e);
314 			} else {
315 				ASSERT(sm_entry_is_double_word(e));
316 				words = 2;
317 
318 				raw_run = SM2_RUN_DECODE(e);
319 				vdev_id = SM2_VDEV_DECODE(e);
320 
321 				/* move to the second word */
322 				i++;
323 				e = buf[i];
324 
325 				ASSERT3P(i, <=, nwords);
326 
327 				type = SM2_TYPE_DECODE(e);
328 				raw_offset = SM2_OFFSET_DECODE(e);
329 			}
330 
331 			uint64_t entry_offset =
332 			    (raw_offset << sm->sm_shift) + sm->sm_start;
333 			uint64_t entry_run = raw_run << sm->sm_shift;
334 
335 			VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
336 			VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
337 			VERIFY3U(entry_offset, >=, sm->sm_start);
338 			VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
339 			VERIFY3U(entry_run, <=, sm->sm_size);
340 			VERIFY3U(entry_offset + entry_run, <=,
341 			    sm->sm_start + sm->sm_size);
342 
343 			space_map_entry_t sme = {
344 			    .sme_type = type,
345 			    .sme_vdev = vdev_id,
346 			    .sme_offset = entry_offset,
347 			    .sme_run = entry_run
348 			};
349 			error = callback(&sme, arg);
350 			if (error != 0)
351 				break;
352 
353 			if (type == SM_ALLOC)
354 				sm->sm_phys->smp_alloc -= entry_run;
355 			else
356 				sm->sm_phys->smp_alloc += entry_run;
357 			sm->sm_phys->smp_objsize -= words * sizeof (uint64_t);
358 			space_map_update(sm);
359 		}
360 	}
361 
362 	if (space_map_length(sm) == 0) {
363 		ASSERT0(error);
364 		ASSERT0(sm->sm_phys->smp_objsize);
365 		ASSERT0(sm->sm_alloc);
366 	}
367 
368 	zio_buf_free(buf, bufsz);
369 	return (error);
370 }
371 
372 typedef struct space_map_load_arg {
373 	space_map_t	*smla_sm;
374 	range_tree_t	*smla_rt;
375 	maptype_t	smla_type;
376 } space_map_load_arg_t;
377 
378 static int
379 space_map_load_callback(space_map_entry_t *sme, void *arg)
380 {
381 	space_map_load_arg_t *smla = arg;
382 	if (sme->sme_type == smla->smla_type) {
383 		VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
384 		    smla->smla_sm->sm_size);
385 		range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
386 	} else {
387 		range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
388 	}
389 
390 	return (0);
391 }
392 
393 /*
394  * Load the space map disk into the specified range tree. Segments of maptype
395  * are added to the range tree, other segment types are removed.
396  */
397 int
398 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
399 {
400 	uint64_t space;
401 	int err;
402 	space_map_load_arg_t smla;
403 
404 	VERIFY0(range_tree_space(rt));
405 	space = space_map_allocated(sm);
406 
407 	if (maptype == SM_FREE) {
408 		range_tree_add(rt, sm->sm_start, sm->sm_size);
409 		space = sm->sm_size - space;
410 	}
411 
412 	smla.smla_rt = rt;
413 	smla.smla_sm = sm;
414 	smla.smla_type = maptype;
415 	err = space_map_iterate(sm, space_map_load_callback, &smla);
416 
417 	if (err == 0) {
418 		VERIFY3U(range_tree_space(rt), ==, space);
419 	} else {
420 		range_tree_vacate(rt, NULL, NULL);
421 	}
422 
423 	return (err);
424 }
425 
426 void
427 space_map_histogram_clear(space_map_t *sm)
428 {
429 	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
430 		return;
431 
432 	bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
433 }
434 
435 boolean_t
436 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
437 {
438 	/*
439 	 * Verify that the in-core range tree does not have any
440 	 * ranges smaller than our sm_shift size.
441 	 */
442 	for (int i = 0; i < sm->sm_shift; i++) {
443 		if (rt->rt_histogram[i] != 0)
444 			return (B_FALSE);
445 	}
446 	return (B_TRUE);
447 }
448 
449 void
450 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
451 {
452 	int idx = 0;
453 
454 	ASSERT(dmu_tx_is_syncing(tx));
455 	VERIFY3U(space_map_object(sm), !=, 0);
456 
457 	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
458 		return;
459 
460 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
461 
462 	ASSERT(space_map_histogram_verify(sm, rt));
463 	/*
464 	 * Transfer the content of the range tree histogram to the space
465 	 * map histogram. The space map histogram contains 32 buckets ranging
466 	 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
467 	 * however, can represent ranges from 2^0 to 2^63. Since the space
468 	 * map only cares about allocatable blocks (minimum of sm_shift) we
469 	 * can safely ignore all ranges in the range tree smaller than sm_shift.
470 	 */
471 	for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
472 
473 		/*
474 		 * Since the largest histogram bucket in the space map is
475 		 * 2^(32+sm_shift-1), we need to normalize the values in
476 		 * the range tree for any bucket larger than that size. For
477 		 * example given an sm_shift of 9, ranges larger than 2^40
478 		 * would get normalized as if they were 1TB ranges. Assume
479 		 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
480 		 * the calculation below would normalize this to 5 * 2^4 (16).
481 		 */
482 		ASSERT3U(i, >=, idx + sm->sm_shift);
483 		sm->sm_phys->smp_histogram[idx] +=
484 		    rt->rt_histogram[i] << (i - idx - sm->sm_shift);
485 
486 		/*
487 		 * Increment the space map's index as long as we haven't
488 		 * reached the maximum bucket size. Accumulate all ranges
489 		 * larger than the max bucket size into the last bucket.
490 		 */
491 		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
492 			ASSERT3U(idx + sm->sm_shift, ==, i);
493 			idx++;
494 			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
495 		}
496 	}
497 }
498 
499 static void
500 space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
501 {
502 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
503 
504 	uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
505 	    SM_DEBUG_ACTION_ENCODE(maptype) |
506 	    SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
507 	    SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
508 
509 	dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_objsize,
510 	    sizeof (dentry), &dentry, tx);
511 
512 	sm->sm_phys->smp_objsize += sizeof (dentry);
513 }
514 
515 /*
516  * Writes one or more entries given a segment.
517  *
518  * Note: The function may release the dbuf from the pointer initially
519  * passed to it, and return a different dbuf. Also, the space map's
520  * dbuf must be dirty for the changes in sm_phys to take effect.
521  */
522 static void
523 space_map_write_seg(space_map_t *sm, range_seg_t *rs, maptype_t maptype,
524     uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, void *tag, dmu_tx_t *tx)
525 {
526 	ASSERT3U(words, !=, 0);
527 	ASSERT3U(words, <=, 2);
528 
529 	/* ensure the vdev_id can be represented by the space map */
530 	ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
531 
532 	/*
533 	 * if this is a single word entry, ensure that no vdev was
534 	 * specified.
535 	 */
536 	IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
537 
538 	dmu_buf_t *db = *dbp;
539 	ASSERT3U(db->db_size, ==, sm->sm_blksz);
540 
541 	uint64_t *block_base = db->db_data;
542 	uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
543 	uint64_t *block_cursor = block_base +
544 	    (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t);
545 
546 	ASSERT3P(block_cursor, <=, block_end);
547 
548 	uint64_t size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
549 	uint64_t start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
550 	uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
551 
552 	ASSERT3U(rs->rs_start, >=, sm->sm_start);
553 	ASSERT3U(rs->rs_start, <, sm->sm_start + sm->sm_size);
554 	ASSERT3U(rs->rs_end - rs->rs_start, <=, sm->sm_size);
555 	ASSERT3U(rs->rs_end, <=, sm->sm_start + sm->sm_size);
556 
557 	while (size != 0) {
558 		ASSERT3P(block_cursor, <=, block_end);
559 
560 		/*
561 		 * If we are at the end of this block, flush it and start
562 		 * writing again from the beginning.
563 		 */
564 		if (block_cursor == block_end) {
565 			dmu_buf_rele(db, tag);
566 
567 			uint64_t next_word_offset = sm->sm_phys->smp_objsize;
568 			VERIFY0(dmu_buf_hold(sm->sm_os,
569 			    space_map_object(sm), next_word_offset,
570 			    tag, &db, DMU_READ_PREFETCH));
571 			dmu_buf_will_dirty(db, tx);
572 
573 			/* update caller's dbuf */
574 			*dbp = db;
575 
576 			ASSERT3U(db->db_size, ==, sm->sm_blksz);
577 
578 			block_base = db->db_data;
579 			block_cursor = block_base;
580 			block_end = block_base +
581 			    (db->db_size / sizeof (uint64_t));
582 		}
583 
584 		/*
585 		 * If we are writing a two-word entry and we only have one
586 		 * word left on this block, just pad it with an empty debug
587 		 * entry and write the two-word entry in the next block.
588 		 */
589 		uint64_t *next_entry = block_cursor + 1;
590 		if (next_entry == block_end && words > 1) {
591 			ASSERT3U(words, ==, 2);
592 			*block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
593 			    SM_DEBUG_ACTION_ENCODE(0) |
594 			    SM_DEBUG_SYNCPASS_ENCODE(0) |
595 			    SM_DEBUG_TXG_ENCODE(0);
596 			block_cursor++;
597 			sm->sm_phys->smp_objsize += sizeof (uint64_t);
598 			ASSERT3P(block_cursor, ==, block_end);
599 			continue;
600 		}
601 
602 		uint64_t run_len = MIN(size, run_max);
603 		switch (words) {
604 		case 1:
605 			*block_cursor = SM_OFFSET_ENCODE(start) |
606 			    SM_TYPE_ENCODE(maptype) |
607 			    SM_RUN_ENCODE(run_len);
608 			block_cursor++;
609 			break;
610 		case 2:
611 			/* write the first word of the entry */
612 			*block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
613 			    SM2_RUN_ENCODE(run_len) |
614 			    SM2_VDEV_ENCODE(vdev_id);
615 			block_cursor++;
616 
617 			/* move on to the second word of the entry */
618 			ASSERT3P(block_cursor, <, block_end);
619 			*block_cursor = SM2_TYPE_ENCODE(maptype) |
620 			    SM2_OFFSET_ENCODE(start);
621 			block_cursor++;
622 			break;
623 		default:
624 			panic("%d-word space map entries are not supported",
625 			    words);
626 			break;
627 		}
628 		sm->sm_phys->smp_objsize += words * sizeof (uint64_t);
629 
630 		start += run_len;
631 		size -= run_len;
632 	}
633 	ASSERT0(size);
634 
635 }
636 
637 /*
638  * Note: The space map's dbuf must be dirty for the changes in sm_phys to
639  * take effect.
640  */
641 static void
642 space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
643     uint64_t vdev_id, dmu_tx_t *tx)
644 {
645 	spa_t *spa = tx->tx_pool->dp_spa;
646 	dmu_buf_t *db;
647 
648 	space_map_write_intro_debug(sm, maptype, tx);
649 
650 #ifdef DEBUG
651 	/*
652 	 * We do this right after we write the intro debug entry
653 	 * because the estimate does not take it into account.
654 	 */
655 	uint64_t initial_objsize = sm->sm_phys->smp_objsize;
656 	uint64_t estimated_growth =
657 	    space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
658 	uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
659 #endif
660 
661 	/*
662 	 * Find the offset right after the last word in the space map
663 	 * and use that to get a hold of the last block, so we can
664 	 * start appending to it.
665 	 */
666 	uint64_t next_word_offset = sm->sm_phys->smp_objsize;
667 	VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
668 	    next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
669 	ASSERT3U(db->db_size, ==, sm->sm_blksz);
670 
671 	dmu_buf_will_dirty(db, tx);
672 
673 	avl_tree_t *t = &rt->rt_root;
674 	for (range_seg_t *rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
675 		uint64_t offset = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
676 		uint64_t length = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
677 		uint8_t words = 1;
678 
679 		/*
680 		 * We only write two-word entries when both of the following
681 		 * are true:
682 		 *
683 		 * [1] The feature is enabled.
684 		 * [2] The offset or run is too big for a single-word entry,
685 		 *	or the vdev_id is set (meaning not equal to
686 		 *	SM_NO_VDEVID).
687 		 *
688 		 * Note that for purposes of testing we've added the case that
689 		 * we write two-word entries occasionally when the feature is
690 		 * enabled and zfs_force_some_double_word_sm_entries has been
691 		 * set.
692 		 */
693 		if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
694 		    (offset >= (1ULL << SM_OFFSET_BITS) ||
695 		    length > SM_RUN_MAX ||
696 		    vdev_id != SM_NO_VDEVID ||
697 		    (zfs_force_some_double_word_sm_entries &&
698 		    spa_get_random(100) == 0)))
699 			words = 2;
700 
701 		space_map_write_seg(sm, rs, maptype, vdev_id, words,
702 		    &db, FTAG, tx);
703 	}
704 
705 	dmu_buf_rele(db, FTAG);
706 
707 #ifdef DEBUG
708 	/*
709 	 * We expect our estimation to be based on the worst case
710 	 * scenario [see comment in space_map_estimate_optimal_size()].
711 	 * Therefore we expect the actual objsize to be equal or less
712 	 * than whatever we estimated it to be.
713 	 */
714 	ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_objsize);
715 #endif
716 }
717 
718 /*
719  * Note: This function manipulates the state of the given space map but
720  * does not hold any locks implicitly. Thus the caller is responsible
721  * for synchronizing writes to the space map.
722  */
723 void
724 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
725     uint64_t vdev_id, dmu_tx_t *tx)
726 {
727 	objset_t *os = sm->sm_os;
728 
729 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
730 	VERIFY3U(space_map_object(sm), !=, 0);
731 
732 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
733 
734 	/*
735 	 * This field is no longer necessary since the in-core space map
736 	 * now contains the object number but is maintained for backwards
737 	 * compatibility.
738 	 */
739 	sm->sm_phys->smp_object = sm->sm_object;
740 
741 	if (range_tree_is_empty(rt)) {
742 		VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
743 		return;
744 	}
745 
746 	if (maptype == SM_ALLOC)
747 		sm->sm_phys->smp_alloc += range_tree_space(rt);
748 	else
749 		sm->sm_phys->smp_alloc -= range_tree_space(rt);
750 
751 	uint64_t nodes = avl_numnodes(&rt->rt_root);
752 	uint64_t rt_space = range_tree_space(rt);
753 
754 	space_map_write_impl(sm, rt, maptype, vdev_id, tx);
755 
756 	/*
757 	 * Ensure that the space_map's accounting wasn't changed
758 	 * while we were in the middle of writing it out.
759 	 */
760 	VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
761 	VERIFY3U(range_tree_space(rt), ==, rt_space);
762 }
763 
764 static int
765 space_map_open_impl(space_map_t *sm)
766 {
767 	int error;
768 	u_longlong_t blocks;
769 
770 	error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
771 	if (error)
772 		return (error);
773 
774 	dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
775 	sm->sm_phys = sm->sm_dbuf->db_data;
776 	return (0);
777 }
778 
779 int
780 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
781     uint64_t start, uint64_t size, uint8_t shift)
782 {
783 	space_map_t *sm;
784 	int error;
785 
786 	ASSERT(*smp == NULL);
787 	ASSERT(os != NULL);
788 	ASSERT(object != 0);
789 
790 	sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
791 
792 	sm->sm_start = start;
793 	sm->sm_size = size;
794 	sm->sm_shift = shift;
795 	sm->sm_os = os;
796 	sm->sm_object = object;
797 
798 	error = space_map_open_impl(sm);
799 	if (error != 0) {
800 		space_map_close(sm);
801 		return (error);
802 	}
803 	*smp = sm;
804 
805 	return (0);
806 }
807 
808 void
809 space_map_close(space_map_t *sm)
810 {
811 	if (sm == NULL)
812 		return;
813 
814 	if (sm->sm_dbuf != NULL)
815 		dmu_buf_rele(sm->sm_dbuf, sm);
816 	sm->sm_dbuf = NULL;
817 	sm->sm_phys = NULL;
818 
819 	kmem_free(sm, sizeof (*sm));
820 }
821 
822 void
823 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
824 {
825 	objset_t *os = sm->sm_os;
826 	spa_t *spa = dmu_objset_spa(os);
827 	dmu_object_info_t doi;
828 
829 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
830 	ASSERT(dmu_tx_is_syncing(tx));
831 	VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
832 
833 	dmu_object_info_from_db(sm->sm_dbuf, &doi);
834 
835 	/*
836 	 * If the space map has the wrong bonus size (because
837 	 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
838 	 * the wrong block size (because space_map_blksz has changed),
839 	 * free and re-allocate its object with the updated sizes.
840 	 *
841 	 * Otherwise, just truncate the current object.
842 	 */
843 	if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
844 	    doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
845 	    doi.doi_data_block_size != blocksize ||
846 	    doi.doi_metadata_block_size != 1 << space_map_ibs) {
847 		zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
848 		    "object[%llu]: old bonus %u, old blocksz %u",
849 		    dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
850 		    doi.doi_bonus_size, doi.doi_data_block_size);
851 
852 		space_map_free(sm, tx);
853 		dmu_buf_rele(sm->sm_dbuf, sm);
854 
855 		sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
856 		VERIFY0(space_map_open_impl(sm));
857 	} else {
858 		VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
859 
860 		/*
861 		 * If the spacemap is reallocated, its histogram
862 		 * will be reset.  Do the same in the common case so that
863 		 * bugs related to the uncommon case do not go unnoticed.
864 		 */
865 		bzero(sm->sm_phys->smp_histogram,
866 		    sizeof (sm->sm_phys->smp_histogram));
867 	}
868 
869 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
870 	sm->sm_phys->smp_objsize = 0;
871 	sm->sm_phys->smp_alloc = 0;
872 }
873 
874 /*
875  * Update the in-core space_map allocation and length values.
876  */
877 void
878 space_map_update(space_map_t *sm)
879 {
880 	if (sm == NULL)
881 		return;
882 
883 	sm->sm_alloc = sm->sm_phys->smp_alloc;
884 	sm->sm_length = sm->sm_phys->smp_objsize;
885 }
886 
887 uint64_t
888 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
889 {
890 	spa_t *spa = dmu_objset_spa(os);
891 	uint64_t object;
892 	int bonuslen;
893 
894 	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
895 		spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
896 		bonuslen = sizeof (space_map_phys_t);
897 		ASSERT3U(bonuslen, <=, dmu_bonus_max());
898 	} else {
899 		bonuslen = SPACE_MAP_SIZE_V0;
900 	}
901 
902 	object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize,
903 	    space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
904 
905 	return (object);
906 }
907 
908 void
909 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
910 {
911 	spa_t *spa = dmu_objset_spa(os);
912 	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
913 		dmu_object_info_t doi;
914 
915 		VERIFY0(dmu_object_info(os, smobj, &doi));
916 		if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
917 			spa_feature_decr(spa,
918 			    SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
919 		}
920 	}
921 
922 	VERIFY0(dmu_object_free(os, smobj, tx));
923 }
924 
925 void
926 space_map_free(space_map_t *sm, dmu_tx_t *tx)
927 {
928 	if (sm == NULL)
929 		return;
930 
931 	space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
932 	sm->sm_object = 0;
933 }
934 
935 /*
936  * Given a range tree, it makes a worst-case estimate of how much
937  * space would the tree's segments take if they were written to
938  * the given space map.
939  */
940 uint64_t
941 space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
942     uint64_t vdev_id)
943 {
944 	spa_t *spa = dmu_objset_spa(sm->sm_os);
945 	uint64_t shift = sm->sm_shift;
946 	uint64_t *histogram = rt->rt_histogram;
947 	uint64_t entries_for_seg = 0;
948 
949 	/*
950 	 * In order to get a quick estimate of the optimal size that this
951 	 * range tree would have on-disk as a space map, we iterate through
952 	 * its histogram buckets instead of iterating through its nodes.
953 	 *
954 	 * Note that this is a highest-bound/worst-case estimate for the
955 	 * following reasons:
956 	 *
957 	 * 1] We assume that we always add a debug padding for each block
958 	 *    we write and we also assume that we start at the last word
959 	 *    of a block attempting to write a two-word entry.
960 	 * 2] Rounding up errors due to the way segments are distributed
961 	 *    in the buckets of the range tree's histogram.
962 	 * 3] The activation of zfs_force_some_double_word_sm_entries
963 	 *    (tunable) when testing.
964 	 *
965 	 * = Math and Rounding Errors =
966 	 *
967 	 * rt_histogram[i] bucket of a range tree represents the number
968 	 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
969 	 * that, we want to divide the buckets into groups: Buckets that
970 	 * can be represented using a single-word entry, ones that can
971 	 * be represented with a double-word entry, and ones that can
972 	 * only be represented with multiple two-word entries.
973 	 *
974 	 * [Note that if the new encoding feature is not enabled there
975 	 * are only two groups: single-word entry buckets and multiple
976 	 * single-word entry buckets. The information below assumes
977 	 * two-word entries enabled, but it can easily applied when
978 	 * the feature is not enabled]
979 	 *
980 	 * To find the highest bucket that can be represented with a
981 	 * single-word entry we look at the maximum run that such entry
982 	 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
983 	 * the run of a space map entry is shifted by sm_shift, thus we
984 	 * add it to the exponent]. This way, excluding the value of the
985 	 * maximum run that can be represented by a single-word entry,
986 	 * all runs that are smaller exist in buckets 0 to
987 	 * SM_RUN_BITS + shift - 1.
988 	 *
989 	 * To find the highest bucket that can be represented with a
990 	 * double-word entry, we follow the same approach. Finally, any
991 	 * bucket higher than that are represented with multiple two-word
992 	 * entries. To be more specific, if the highest bucket whose
993 	 * segments can be represented with a single two-word entry is X,
994 	 * then bucket X+1 will need 2 two-word entries for each of its
995 	 * segments, X+2 will need 4, X+3 will need 8, ...etc.
996 	 *
997 	 * With all of the above we make our estimation based on bucket
998 	 * groups. There is a rounding error though. As we mentioned in
999 	 * the example with the one-word entry, the maximum run that can
1000 	 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
1001 	 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
1002 	 * that length fall into the next bucket (and bucket group) where
1003 	 * we start counting two-word entries and this is one more reason
1004 	 * why the estimated size may end up being bigger than the actual
1005 	 * size written.
1006 	 */
1007 	uint64_t size = 0;
1008 	uint64_t idx = 0;
1009 
1010 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
1011 	    (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
1012 
1013 		/*
1014 		 * If we are trying to force some double word entries just
1015 		 * assume the worst-case of every single word entry being
1016 		 * written as a double word entry.
1017 		 */
1018 		uint64_t entry_size =
1019 		    (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
1020 		    zfs_force_some_double_word_sm_entries) ?
1021 		    (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1022 
1023 		uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
1024 		for (; idx <= single_entry_max_bucket; idx++)
1025 			size += histogram[idx] * entry_size;
1026 
1027 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
1028 			for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1029 				ASSERT3U(idx, >=, single_entry_max_bucket);
1030 				entries_for_seg =
1031 				    1ULL << (idx - single_entry_max_bucket);
1032 				size += histogram[idx] *
1033 				    entries_for_seg * entry_size;
1034 			}
1035 			return (size);
1036 		}
1037 	}
1038 
1039 	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
1040 
1041 	uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
1042 	for (; idx <= double_entry_max_bucket; idx++)
1043 		size += histogram[idx] * 2 * sizeof (uint64_t);
1044 
1045 	for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1046 		ASSERT3U(idx, >=, double_entry_max_bucket);
1047 		entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
1048 		size += histogram[idx] *
1049 		    entries_for_seg * 2 * sizeof (uint64_t);
1050 	}
1051 
1052 	/*
1053 	 * Assume the worst case where we start with the padding at the end
1054 	 * of the current block and we add an extra padding entry at the end
1055 	 * of all subsequent blocks.
1056 	 */
1057 	size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
1058 
1059 	return (size);
1060 }
1061 
1062 uint64_t
1063 space_map_object(space_map_t *sm)
1064 {
1065 	return (sm != NULL ? sm->sm_object : 0);
1066 }
1067 
1068 /*
1069  * Returns the already synced, on-disk allocated space.
1070  */
1071 uint64_t
1072 space_map_allocated(space_map_t *sm)
1073 {
1074 	return (sm != NULL ? sm->sm_alloc : 0);
1075 }
1076 
1077 /*
1078  * Returns the already synced, on-disk length;
1079  */
1080 uint64_t
1081 space_map_length(space_map_t *sm)
1082 {
1083 	return (sm != NULL ? sm->sm_length : 0);
1084 }
1085 
1086 /*
1087  * Returns the allocated space that is currently syncing.
1088  */
1089 int64_t
1090 space_map_alloc_delta(space_map_t *sm)
1091 {
1092 	if (sm == NULL)
1093 		return (0);
1094 	ASSERT(sm->sm_dbuf != NULL);
1095 	return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
1096 }
1097