xref: /illumos-gate/usr/src/uts/common/fs/zfs/dbuf.c (revision 45818ee124adeaaf947698996b4f4c722afc6d1f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28  */
29 
30 #include <sys/zfs_context.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_send.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dbuf.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/spa.h>
40 #include <sys/zio.h>
41 #include <sys/dmu_zfetch.h>
42 #include <sys/sa.h>
43 #include <sys/sa_impl.h>
44 #include <sys/zfeature.h>
45 #include <sys/blkptr.h>
46 #include <sys/range_tree.h>
47 
48 /*
49  * Number of times that zfs_free_range() took the slow path while doing
50  * a zfs receive.  A nonzero value indicates a potential performance problem.
51  */
52 uint64_t zfs_free_range_recv_miss;
53 
54 static void dbuf_destroy(dmu_buf_impl_t *db);
55 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
56 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
57 
58 #ifndef __lint
59 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
60     dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp);
61 #endif /* ! __lint */
62 
63 /*
64  * Global data structures and functions for the dbuf cache.
65  */
66 static kmem_cache_t *dbuf_cache;
67 static taskq_t *dbu_evict_taskq;
68 
69 /* ARGSUSED */
70 static int
71 dbuf_cons(void *vdb, void *unused, int kmflag)
72 {
73 	dmu_buf_impl_t *db = vdb;
74 	bzero(db, sizeof (dmu_buf_impl_t));
75 
76 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
77 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
78 	refcount_create(&db->db_holds);
79 
80 	return (0);
81 }
82 
83 /* ARGSUSED */
84 static void
85 dbuf_dest(void *vdb, void *unused)
86 {
87 	dmu_buf_impl_t *db = vdb;
88 	mutex_destroy(&db->db_mtx);
89 	cv_destroy(&db->db_changed);
90 	refcount_destroy(&db->db_holds);
91 }
92 
93 /*
94  * dbuf hash table routines
95  */
96 static dbuf_hash_table_t dbuf_hash_table;
97 
98 static uint64_t dbuf_hash_count;
99 
100 static uint64_t
101 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
102 {
103 	uintptr_t osv = (uintptr_t)os;
104 	uint64_t crc = -1ULL;
105 
106 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
107 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
108 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
109 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
110 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
111 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
112 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
113 
114 	crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
115 
116 	return (crc);
117 }
118 
119 #define	DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
120 
121 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
122 	((dbuf)->db.db_object == (obj) &&		\
123 	(dbuf)->db_objset == (os) &&			\
124 	(dbuf)->db_level == (level) &&			\
125 	(dbuf)->db_blkid == (blkid))
126 
127 dmu_buf_impl_t *
128 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
129 {
130 	dbuf_hash_table_t *h = &dbuf_hash_table;
131 	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
132 	uint64_t idx = hv & h->hash_table_mask;
133 	dmu_buf_impl_t *db;
134 
135 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
136 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
137 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
138 			mutex_enter(&db->db_mtx);
139 			if (db->db_state != DB_EVICTING) {
140 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
141 				return (db);
142 			}
143 			mutex_exit(&db->db_mtx);
144 		}
145 	}
146 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
147 	return (NULL);
148 }
149 
150 static dmu_buf_impl_t *
151 dbuf_find_bonus(objset_t *os, uint64_t object)
152 {
153 	dnode_t *dn;
154 	dmu_buf_impl_t *db = NULL;
155 
156 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
157 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
158 		if (dn->dn_bonus != NULL) {
159 			db = dn->dn_bonus;
160 			mutex_enter(&db->db_mtx);
161 		}
162 		rw_exit(&dn->dn_struct_rwlock);
163 		dnode_rele(dn, FTAG);
164 	}
165 	return (db);
166 }
167 
168 /*
169  * Insert an entry into the hash table.  If there is already an element
170  * equal to elem in the hash table, then the already existing element
171  * will be returned and the new element will not be inserted.
172  * Otherwise returns NULL.
173  */
174 static dmu_buf_impl_t *
175 dbuf_hash_insert(dmu_buf_impl_t *db)
176 {
177 	dbuf_hash_table_t *h = &dbuf_hash_table;
178 	objset_t *os = db->db_objset;
179 	uint64_t obj = db->db.db_object;
180 	int level = db->db_level;
181 	uint64_t blkid = db->db_blkid;
182 	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
183 	uint64_t idx = hv & h->hash_table_mask;
184 	dmu_buf_impl_t *dbf;
185 
186 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
187 	for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
188 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
189 			mutex_enter(&dbf->db_mtx);
190 			if (dbf->db_state != DB_EVICTING) {
191 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
192 				return (dbf);
193 			}
194 			mutex_exit(&dbf->db_mtx);
195 		}
196 	}
197 
198 	mutex_enter(&db->db_mtx);
199 	db->db_hash_next = h->hash_table[idx];
200 	h->hash_table[idx] = db;
201 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
202 	atomic_inc_64(&dbuf_hash_count);
203 
204 	return (NULL);
205 }
206 
207 /*
208  * Remove an entry from the hash table.  It must be in the EVICTING state.
209  */
210 static void
211 dbuf_hash_remove(dmu_buf_impl_t *db)
212 {
213 	dbuf_hash_table_t *h = &dbuf_hash_table;
214 	uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
215 	    db->db_level, db->db_blkid);
216 	uint64_t idx = hv & h->hash_table_mask;
217 	dmu_buf_impl_t *dbf, **dbp;
218 
219 	/*
220 	 * We musn't hold db_mtx to maintain lock ordering:
221 	 * DBUF_HASH_MUTEX > db_mtx.
222 	 */
223 	ASSERT(refcount_is_zero(&db->db_holds));
224 	ASSERT(db->db_state == DB_EVICTING);
225 	ASSERT(!MUTEX_HELD(&db->db_mtx));
226 
227 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
228 	dbp = &h->hash_table[idx];
229 	while ((dbf = *dbp) != db) {
230 		dbp = &dbf->db_hash_next;
231 		ASSERT(dbf != NULL);
232 	}
233 	*dbp = db->db_hash_next;
234 	db->db_hash_next = NULL;
235 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
236 	atomic_dec_64(&dbuf_hash_count);
237 }
238 
239 static arc_evict_func_t dbuf_do_evict;
240 
241 typedef enum {
242 	DBVU_EVICTING,
243 	DBVU_NOT_EVICTING
244 } dbvu_verify_type_t;
245 
246 static void
247 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
248 {
249 #ifdef ZFS_DEBUG
250 	int64_t holds;
251 
252 	if (db->db_user == NULL)
253 		return;
254 
255 	/* Only data blocks support the attachment of user data. */
256 	ASSERT(db->db_level == 0);
257 
258 	/* Clients must resolve a dbuf before attaching user data. */
259 	ASSERT(db->db.db_data != NULL);
260 	ASSERT3U(db->db_state, ==, DB_CACHED);
261 
262 	holds = refcount_count(&db->db_holds);
263 	if (verify_type == DBVU_EVICTING) {
264 		/*
265 		 * Immediate eviction occurs when holds == dirtycnt.
266 		 * For normal eviction buffers, holds is zero on
267 		 * eviction, except when dbuf_fix_old_data() calls
268 		 * dbuf_clear_data().  However, the hold count can grow
269 		 * during eviction even though db_mtx is held (see
270 		 * dmu_bonus_hold() for an example), so we can only
271 		 * test the generic invariant that holds >= dirtycnt.
272 		 */
273 		ASSERT3U(holds, >=, db->db_dirtycnt);
274 	} else {
275 		if (db->db_immediate_evict == TRUE)
276 			ASSERT3U(holds, >=, db->db_dirtycnt);
277 		else
278 			ASSERT3U(holds, >, 0);
279 	}
280 #endif
281 }
282 
283 static void
284 dbuf_evict_user(dmu_buf_impl_t *db)
285 {
286 	dmu_buf_user_t *dbu = db->db_user;
287 
288 	ASSERT(MUTEX_HELD(&db->db_mtx));
289 
290 	if (dbu == NULL)
291 		return;
292 
293 	dbuf_verify_user(db, DBVU_EVICTING);
294 	db->db_user = NULL;
295 
296 #ifdef ZFS_DEBUG
297 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
298 		*dbu->dbu_clear_on_evict_dbufp = NULL;
299 #endif
300 
301 	/*
302 	 * Invoke the callback from a taskq to avoid lock order reversals
303 	 * and limit stack depth.
304 	 */
305 	taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0,
306 	    &dbu->dbu_tqent);
307 }
308 
309 boolean_t
310 dbuf_is_metadata(dmu_buf_impl_t *db)
311 {
312 	if (db->db_level > 0) {
313 		return (B_TRUE);
314 	} else {
315 		boolean_t is_metadata;
316 
317 		DB_DNODE_ENTER(db);
318 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
319 		DB_DNODE_EXIT(db);
320 
321 		return (is_metadata);
322 	}
323 }
324 
325 void
326 dbuf_evict(dmu_buf_impl_t *db)
327 {
328 	ASSERT(MUTEX_HELD(&db->db_mtx));
329 	ASSERT(db->db_buf == NULL);
330 	ASSERT(db->db_data_pending == NULL);
331 
332 	dbuf_clear(db);
333 	dbuf_destroy(db);
334 }
335 
336 void
337 dbuf_init(void)
338 {
339 	uint64_t hsize = 1ULL << 16;
340 	dbuf_hash_table_t *h = &dbuf_hash_table;
341 	int i;
342 
343 	/*
344 	 * The hash table is big enough to fill all of physical memory
345 	 * with an average 4K block size.  The table will take up
346 	 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
347 	 */
348 	while (hsize * 4096 < physmem * PAGESIZE)
349 		hsize <<= 1;
350 
351 retry:
352 	h->hash_table_mask = hsize - 1;
353 	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
354 	if (h->hash_table == NULL) {
355 		/* XXX - we should really return an error instead of assert */
356 		ASSERT(hsize > (1ULL << 10));
357 		hsize >>= 1;
358 		goto retry;
359 	}
360 
361 	dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
362 	    sizeof (dmu_buf_impl_t),
363 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
364 
365 	for (i = 0; i < DBUF_MUTEXES; i++)
366 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
367 
368 	/*
369 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
370 	 * configuration is not required.
371 	 */
372 	dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0);
373 }
374 
375 void
376 dbuf_fini(void)
377 {
378 	dbuf_hash_table_t *h = &dbuf_hash_table;
379 	int i;
380 
381 	for (i = 0; i < DBUF_MUTEXES; i++)
382 		mutex_destroy(&h->hash_mutexes[i]);
383 	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
384 	kmem_cache_destroy(dbuf_cache);
385 	taskq_destroy(dbu_evict_taskq);
386 }
387 
388 /*
389  * Other stuff.
390  */
391 
392 #ifdef ZFS_DEBUG
393 static void
394 dbuf_verify(dmu_buf_impl_t *db)
395 {
396 	dnode_t *dn;
397 	dbuf_dirty_record_t *dr;
398 
399 	ASSERT(MUTEX_HELD(&db->db_mtx));
400 
401 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
402 		return;
403 
404 	ASSERT(db->db_objset != NULL);
405 	DB_DNODE_ENTER(db);
406 	dn = DB_DNODE(db);
407 	if (dn == NULL) {
408 		ASSERT(db->db_parent == NULL);
409 		ASSERT(db->db_blkptr == NULL);
410 	} else {
411 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
412 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
413 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
414 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
415 		    db->db_blkid == DMU_SPILL_BLKID ||
416 		    !avl_is_empty(&dn->dn_dbufs));
417 	}
418 	if (db->db_blkid == DMU_BONUS_BLKID) {
419 		ASSERT(dn != NULL);
420 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
421 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
422 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
423 		ASSERT(dn != NULL);
424 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
425 		ASSERT0(db->db.db_offset);
426 	} else {
427 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
428 	}
429 
430 	for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
431 		ASSERT(dr->dr_dbuf == db);
432 
433 	for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
434 		ASSERT(dr->dr_dbuf == db);
435 
436 	/*
437 	 * We can't assert that db_size matches dn_datablksz because it
438 	 * can be momentarily different when another thread is doing
439 	 * dnode_set_blksz().
440 	 */
441 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
442 		dr = db->db_data_pending;
443 		/*
444 		 * It should only be modified in syncing context, so
445 		 * make sure we only have one copy of the data.
446 		 */
447 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
448 	}
449 
450 	/* verify db->db_blkptr */
451 	if (db->db_blkptr) {
452 		if (db->db_parent == dn->dn_dbuf) {
453 			/* db is pointed to by the dnode */
454 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
455 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
456 				ASSERT(db->db_parent == NULL);
457 			else
458 				ASSERT(db->db_parent != NULL);
459 			if (db->db_blkid != DMU_SPILL_BLKID)
460 				ASSERT3P(db->db_blkptr, ==,
461 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
462 		} else {
463 			/* db is pointed to by an indirect block */
464 			int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
465 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
466 			ASSERT3U(db->db_parent->db.db_object, ==,
467 			    db->db.db_object);
468 			/*
469 			 * dnode_grow_indblksz() can make this fail if we don't
470 			 * have the struct_rwlock.  XXX indblksz no longer
471 			 * grows.  safe to do this now?
472 			 */
473 			if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
474 				ASSERT3P(db->db_blkptr, ==,
475 				    ((blkptr_t *)db->db_parent->db.db_data +
476 				    db->db_blkid % epb));
477 			}
478 		}
479 	}
480 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
481 	    (db->db_buf == NULL || db->db_buf->b_data) &&
482 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
483 	    db->db_state != DB_FILL && !dn->dn_free_txg) {
484 		/*
485 		 * If the blkptr isn't set but they have nonzero data,
486 		 * it had better be dirty, otherwise we'll lose that
487 		 * data when we evict this buffer.
488 		 */
489 		if (db->db_dirtycnt == 0) {
490 			uint64_t *buf = db->db.db_data;
491 			int i;
492 
493 			for (i = 0; i < db->db.db_size >> 3; i++) {
494 				ASSERT(buf[i] == 0);
495 			}
496 		}
497 	}
498 	DB_DNODE_EXIT(db);
499 }
500 #endif
501 
502 static void
503 dbuf_clear_data(dmu_buf_impl_t *db)
504 {
505 	ASSERT(MUTEX_HELD(&db->db_mtx));
506 	dbuf_evict_user(db);
507 	db->db_buf = NULL;
508 	db->db.db_data = NULL;
509 	if (db->db_state != DB_NOFILL)
510 		db->db_state = DB_UNCACHED;
511 }
512 
513 static void
514 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
515 {
516 	ASSERT(MUTEX_HELD(&db->db_mtx));
517 	ASSERT(buf != NULL);
518 
519 	db->db_buf = buf;
520 	ASSERT(buf->b_data != NULL);
521 	db->db.db_data = buf->b_data;
522 	if (!arc_released(buf))
523 		arc_set_callback(buf, dbuf_do_evict, db);
524 }
525 
526 /*
527  * Loan out an arc_buf for read.  Return the loaned arc_buf.
528  */
529 arc_buf_t *
530 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
531 {
532 	arc_buf_t *abuf;
533 
534 	mutex_enter(&db->db_mtx);
535 	if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
536 		int blksz = db->db.db_size;
537 		spa_t *spa = db->db_objset->os_spa;
538 
539 		mutex_exit(&db->db_mtx);
540 		abuf = arc_loan_buf(spa, blksz);
541 		bcopy(db->db.db_data, abuf->b_data, blksz);
542 	} else {
543 		abuf = db->db_buf;
544 		arc_loan_inuse_buf(abuf, db);
545 		dbuf_clear_data(db);
546 		mutex_exit(&db->db_mtx);
547 	}
548 	return (abuf);
549 }
550 
551 /*
552  * Calculate which level n block references the data at the level 0 offset
553  * provided.
554  */
555 uint64_t
556 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
557 {
558 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
559 		/*
560 		 * The level n blkid is equal to the level 0 blkid divided by
561 		 * the number of level 0s in a level n block.
562 		 *
563 		 * The level 0 blkid is offset >> datablkshift =
564 		 * offset / 2^datablkshift.
565 		 *
566 		 * The number of level 0s in a level n is the number of block
567 		 * pointers in an indirect block, raised to the power of level.
568 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
569 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
570 		 *
571 		 * Thus, the level n blkid is: offset /
572 		 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT)))
573 		 * = offset / 2^(datablkshift + level *
574 		 *   (indblkshift - SPA_BLKPTRSHIFT))
575 		 * = offset >> (datablkshift + level *
576 		 *   (indblkshift - SPA_BLKPTRSHIFT))
577 		 */
578 		return (offset >> (dn->dn_datablkshift + level *
579 		    (dn->dn_indblkshift - SPA_BLKPTRSHIFT)));
580 	} else {
581 		ASSERT3U(offset, <, dn->dn_datablksz);
582 		return (0);
583 	}
584 }
585 
586 static void
587 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
588 {
589 	dmu_buf_impl_t *db = vdb;
590 
591 	mutex_enter(&db->db_mtx);
592 	ASSERT3U(db->db_state, ==, DB_READ);
593 	/*
594 	 * All reads are synchronous, so we must have a hold on the dbuf
595 	 */
596 	ASSERT(refcount_count(&db->db_holds) > 0);
597 	ASSERT(db->db_buf == NULL);
598 	ASSERT(db->db.db_data == NULL);
599 	if (db->db_level == 0 && db->db_freed_in_flight) {
600 		/* we were freed in flight; disregard any error */
601 		arc_release(buf, db);
602 		bzero(buf->b_data, db->db.db_size);
603 		arc_buf_freeze(buf);
604 		db->db_freed_in_flight = FALSE;
605 		dbuf_set_data(db, buf);
606 		db->db_state = DB_CACHED;
607 	} else if (zio == NULL || zio->io_error == 0) {
608 		dbuf_set_data(db, buf);
609 		db->db_state = DB_CACHED;
610 	} else {
611 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
612 		ASSERT3P(db->db_buf, ==, NULL);
613 		VERIFY(arc_buf_remove_ref(buf, db));
614 		db->db_state = DB_UNCACHED;
615 	}
616 	cv_broadcast(&db->db_changed);
617 	dbuf_rele_and_unlock(db, NULL);
618 }
619 
620 static void
621 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
622 {
623 	dnode_t *dn;
624 	zbookmark_phys_t zb;
625 	arc_flags_t aflags = ARC_FLAG_NOWAIT;
626 
627 	DB_DNODE_ENTER(db);
628 	dn = DB_DNODE(db);
629 	ASSERT(!refcount_is_zero(&db->db_holds));
630 	/* We need the struct_rwlock to prevent db_blkptr from changing. */
631 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
632 	ASSERT(MUTEX_HELD(&db->db_mtx));
633 	ASSERT(db->db_state == DB_UNCACHED);
634 	ASSERT(db->db_buf == NULL);
635 
636 	if (db->db_blkid == DMU_BONUS_BLKID) {
637 		int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
638 
639 		ASSERT3U(bonuslen, <=, db->db.db_size);
640 		db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
641 		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
642 		if (bonuslen < DN_MAX_BONUSLEN)
643 			bzero(db->db.db_data, DN_MAX_BONUSLEN);
644 		if (bonuslen)
645 			bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
646 		DB_DNODE_EXIT(db);
647 		db->db_state = DB_CACHED;
648 		mutex_exit(&db->db_mtx);
649 		return;
650 	}
651 
652 	/*
653 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
654 	 * processes the delete record and clears the bp while we are waiting
655 	 * for the dn_mtx (resulting in a "no" from block_freed).
656 	 */
657 	if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
658 	    (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
659 	    BP_IS_HOLE(db->db_blkptr)))) {
660 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
661 
662 		DB_DNODE_EXIT(db);
663 		dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
664 		    db->db.db_size, db, type));
665 		bzero(db->db.db_data, db->db.db_size);
666 		db->db_state = DB_CACHED;
667 		mutex_exit(&db->db_mtx);
668 		return;
669 	}
670 
671 	DB_DNODE_EXIT(db);
672 
673 	db->db_state = DB_READ;
674 	mutex_exit(&db->db_mtx);
675 
676 	if (DBUF_IS_L2CACHEABLE(db))
677 		aflags |= ARC_FLAG_L2CACHE;
678 	if (DBUF_IS_L2COMPRESSIBLE(db))
679 		aflags |= ARC_FLAG_L2COMPRESS;
680 
681 	SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
682 	    db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
683 	    db->db.db_object, db->db_level, db->db_blkid);
684 
685 	dbuf_add_ref(db, NULL);
686 
687 	(void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
688 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
689 	    (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
690 	    &aflags, &zb);
691 }
692 
693 int
694 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
695 {
696 	int err = 0;
697 	boolean_t havepzio = (zio != NULL);
698 	boolean_t prefetch;
699 	dnode_t *dn;
700 
701 	/*
702 	 * We don't have to hold the mutex to check db_state because it
703 	 * can't be freed while we have a hold on the buffer.
704 	 */
705 	ASSERT(!refcount_is_zero(&db->db_holds));
706 
707 	if (db->db_state == DB_NOFILL)
708 		return (SET_ERROR(EIO));
709 
710 	DB_DNODE_ENTER(db);
711 	dn = DB_DNODE(db);
712 	if ((flags & DB_RF_HAVESTRUCT) == 0)
713 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
714 
715 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
716 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
717 	    DBUF_IS_CACHEABLE(db);
718 
719 	mutex_enter(&db->db_mtx);
720 	if (db->db_state == DB_CACHED) {
721 		mutex_exit(&db->db_mtx);
722 		if (prefetch)
723 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
724 		if ((flags & DB_RF_HAVESTRUCT) == 0)
725 			rw_exit(&dn->dn_struct_rwlock);
726 		DB_DNODE_EXIT(db);
727 	} else if (db->db_state == DB_UNCACHED) {
728 		spa_t *spa = dn->dn_objset->os_spa;
729 
730 		if (zio == NULL)
731 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
732 		dbuf_read_impl(db, zio, flags);
733 
734 		/* dbuf_read_impl has dropped db_mtx for us */
735 
736 		if (prefetch)
737 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
738 
739 		if ((flags & DB_RF_HAVESTRUCT) == 0)
740 			rw_exit(&dn->dn_struct_rwlock);
741 		DB_DNODE_EXIT(db);
742 
743 		if (!havepzio)
744 			err = zio_wait(zio);
745 	} else {
746 		/*
747 		 * Another reader came in while the dbuf was in flight
748 		 * between UNCACHED and CACHED.  Either a writer will finish
749 		 * writing the buffer (sending the dbuf to CACHED) or the
750 		 * first reader's request will reach the read_done callback
751 		 * and send the dbuf to CACHED.  Otherwise, a failure
752 		 * occurred and the dbuf went to UNCACHED.
753 		 */
754 		mutex_exit(&db->db_mtx);
755 		if (prefetch)
756 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
757 		if ((flags & DB_RF_HAVESTRUCT) == 0)
758 			rw_exit(&dn->dn_struct_rwlock);
759 		DB_DNODE_EXIT(db);
760 
761 		/* Skip the wait per the caller's request. */
762 		mutex_enter(&db->db_mtx);
763 		if ((flags & DB_RF_NEVERWAIT) == 0) {
764 			while (db->db_state == DB_READ ||
765 			    db->db_state == DB_FILL) {
766 				ASSERT(db->db_state == DB_READ ||
767 				    (flags & DB_RF_HAVESTRUCT) == 0);
768 				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
769 				    db, zio_t *, zio);
770 				cv_wait(&db->db_changed, &db->db_mtx);
771 			}
772 			if (db->db_state == DB_UNCACHED)
773 				err = SET_ERROR(EIO);
774 		}
775 		mutex_exit(&db->db_mtx);
776 	}
777 
778 	ASSERT(err || havepzio || db->db_state == DB_CACHED);
779 	return (err);
780 }
781 
782 static void
783 dbuf_noread(dmu_buf_impl_t *db)
784 {
785 	ASSERT(!refcount_is_zero(&db->db_holds));
786 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
787 	mutex_enter(&db->db_mtx);
788 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
789 		cv_wait(&db->db_changed, &db->db_mtx);
790 	if (db->db_state == DB_UNCACHED) {
791 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
792 		spa_t *spa = db->db_objset->os_spa;
793 
794 		ASSERT(db->db_buf == NULL);
795 		ASSERT(db->db.db_data == NULL);
796 		dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
797 		db->db_state = DB_FILL;
798 	} else if (db->db_state == DB_NOFILL) {
799 		dbuf_clear_data(db);
800 	} else {
801 		ASSERT3U(db->db_state, ==, DB_CACHED);
802 	}
803 	mutex_exit(&db->db_mtx);
804 }
805 
806 /*
807  * This is our just-in-time copy function.  It makes a copy of
808  * buffers, that have been modified in a previous transaction
809  * group, before we modify them in the current active group.
810  *
811  * This function is used in two places: when we are dirtying a
812  * buffer for the first time in a txg, and when we are freeing
813  * a range in a dnode that includes this buffer.
814  *
815  * Note that when we are called from dbuf_free_range() we do
816  * not put a hold on the buffer, we just traverse the active
817  * dbuf list for the dnode.
818  */
819 static void
820 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
821 {
822 	dbuf_dirty_record_t *dr = db->db_last_dirty;
823 
824 	ASSERT(MUTEX_HELD(&db->db_mtx));
825 	ASSERT(db->db.db_data != NULL);
826 	ASSERT(db->db_level == 0);
827 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
828 
829 	if (dr == NULL ||
830 	    (dr->dt.dl.dr_data !=
831 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
832 		return;
833 
834 	/*
835 	 * If the last dirty record for this dbuf has not yet synced
836 	 * and its referencing the dbuf data, either:
837 	 *	reset the reference to point to a new copy,
838 	 * or (if there a no active holders)
839 	 *	just null out the current db_data pointer.
840 	 */
841 	ASSERT(dr->dr_txg >= txg - 2);
842 	if (db->db_blkid == DMU_BONUS_BLKID) {
843 		/* Note that the data bufs here are zio_bufs */
844 		dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
845 		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
846 		bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
847 	} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
848 		int size = db->db.db_size;
849 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
850 		spa_t *spa = db->db_objset->os_spa;
851 
852 		dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
853 		bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
854 	} else {
855 		dbuf_clear_data(db);
856 	}
857 }
858 
859 void
860 dbuf_unoverride(dbuf_dirty_record_t *dr)
861 {
862 	dmu_buf_impl_t *db = dr->dr_dbuf;
863 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
864 	uint64_t txg = dr->dr_txg;
865 
866 	ASSERT(MUTEX_HELD(&db->db_mtx));
867 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
868 	ASSERT(db->db_level == 0);
869 
870 	if (db->db_blkid == DMU_BONUS_BLKID ||
871 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
872 		return;
873 
874 	ASSERT(db->db_data_pending != dr);
875 
876 	/* free this block */
877 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
878 		zio_free(db->db_objset->os_spa, txg, bp);
879 
880 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
881 	dr->dt.dl.dr_nopwrite = B_FALSE;
882 
883 	/*
884 	 * Release the already-written buffer, so we leave it in
885 	 * a consistent dirty state.  Note that all callers are
886 	 * modifying the buffer, so they will immediately do
887 	 * another (redundant) arc_release().  Therefore, leave
888 	 * the buf thawed to save the effort of freezing &
889 	 * immediately re-thawing it.
890 	 */
891 	arc_release(dr->dt.dl.dr_data, db);
892 }
893 
894 /*
895  * Evict (if its unreferenced) or clear (if its referenced) any level-0
896  * data blocks in the free range, so that any future readers will find
897  * empty blocks.
898  *
899  * This is a no-op if the dataset is in the middle of an incremental
900  * receive; see comment below for details.
901  */
902 void
903 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
904     dmu_tx_t *tx)
905 {
906 	dmu_buf_impl_t db_search;
907 	dmu_buf_impl_t *db, *db_next;
908 	uint64_t txg = tx->tx_txg;
909 	avl_index_t where;
910 
911 	if (end_blkid > dn->dn_maxblkid && (end_blkid != DMU_SPILL_BLKID))
912 		end_blkid = dn->dn_maxblkid;
913 	dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
914 
915 	db_search.db_level = 0;
916 	db_search.db_blkid = start_blkid;
917 	db_search.db_state = DB_SEARCH;
918 
919 	mutex_enter(&dn->dn_dbufs_mtx);
920 	if (start_blkid >= dn->dn_unlisted_l0_blkid) {
921 		/* There can't be any dbufs in this range; no need to search. */
922 #ifdef DEBUG
923 		db = avl_find(&dn->dn_dbufs, &db_search, &where);
924 		ASSERT3P(db, ==, NULL);
925 		db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
926 		ASSERT(db == NULL || db->db_level > 0);
927 #endif
928 		mutex_exit(&dn->dn_dbufs_mtx);
929 		return;
930 	} else if (dmu_objset_is_receiving(dn->dn_objset)) {
931 		/*
932 		 * If we are receiving, we expect there to be no dbufs in
933 		 * the range to be freed, because receive modifies each
934 		 * block at most once, and in offset order.  If this is
935 		 * not the case, it can lead to performance problems,
936 		 * so note that we unexpectedly took the slow path.
937 		 */
938 		atomic_inc_64(&zfs_free_range_recv_miss);
939 	}
940 
941 	db = avl_find(&dn->dn_dbufs, &db_search, &where);
942 	ASSERT3P(db, ==, NULL);
943 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
944 
945 	for (; db != NULL; db = db_next) {
946 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
947 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
948 
949 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
950 			break;
951 		}
952 		ASSERT3U(db->db_blkid, >=, start_blkid);
953 
954 		/* found a level 0 buffer in the range */
955 		mutex_enter(&db->db_mtx);
956 		if (dbuf_undirty(db, tx)) {
957 			/* mutex has been dropped and dbuf destroyed */
958 			continue;
959 		}
960 
961 		if (db->db_state == DB_UNCACHED ||
962 		    db->db_state == DB_NOFILL ||
963 		    db->db_state == DB_EVICTING) {
964 			ASSERT(db->db.db_data == NULL);
965 			mutex_exit(&db->db_mtx);
966 			continue;
967 		}
968 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
969 			/* will be handled in dbuf_read_done or dbuf_rele */
970 			db->db_freed_in_flight = TRUE;
971 			mutex_exit(&db->db_mtx);
972 			continue;
973 		}
974 		if (refcount_count(&db->db_holds) == 0) {
975 			ASSERT(db->db_buf);
976 			dbuf_clear(db);
977 			continue;
978 		}
979 		/* The dbuf is referenced */
980 
981 		if (db->db_last_dirty != NULL) {
982 			dbuf_dirty_record_t *dr = db->db_last_dirty;
983 
984 			if (dr->dr_txg == txg) {
985 				/*
986 				 * This buffer is "in-use", re-adjust the file
987 				 * size to reflect that this buffer may
988 				 * contain new data when we sync.
989 				 */
990 				if (db->db_blkid != DMU_SPILL_BLKID &&
991 				    db->db_blkid > dn->dn_maxblkid)
992 					dn->dn_maxblkid = db->db_blkid;
993 				dbuf_unoverride(dr);
994 			} else {
995 				/*
996 				 * This dbuf is not dirty in the open context.
997 				 * Either uncache it (if its not referenced in
998 				 * the open context) or reset its contents to
999 				 * empty.
1000 				 */
1001 				dbuf_fix_old_data(db, txg);
1002 			}
1003 		}
1004 		/* clear the contents if its cached */
1005 		if (db->db_state == DB_CACHED) {
1006 			ASSERT(db->db.db_data != NULL);
1007 			arc_release(db->db_buf, db);
1008 			bzero(db->db.db_data, db->db.db_size);
1009 			arc_buf_freeze(db->db_buf);
1010 		}
1011 
1012 		mutex_exit(&db->db_mtx);
1013 	}
1014 	mutex_exit(&dn->dn_dbufs_mtx);
1015 }
1016 
1017 static int
1018 dbuf_block_freeable(dmu_buf_impl_t *db)
1019 {
1020 	dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
1021 	uint64_t birth_txg = 0;
1022 
1023 	/*
1024 	 * We don't need any locking to protect db_blkptr:
1025 	 * If it's syncing, then db_last_dirty will be set
1026 	 * so we'll ignore db_blkptr.
1027 	 *
1028 	 * This logic ensures that only block births for
1029 	 * filled blocks are considered.
1030 	 */
1031 	ASSERT(MUTEX_HELD(&db->db_mtx));
1032 	if (db->db_last_dirty && (db->db_blkptr == NULL ||
1033 	    !BP_IS_HOLE(db->db_blkptr))) {
1034 		birth_txg = db->db_last_dirty->dr_txg;
1035 	} else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1036 		birth_txg = db->db_blkptr->blk_birth;
1037 	}
1038 
1039 	/*
1040 	 * If this block don't exist or is in a snapshot, it can't be freed.
1041 	 * Don't pass the bp to dsl_dataset_block_freeable() since we
1042 	 * are holding the db_mtx lock and might deadlock if we are
1043 	 * prefetching a dedup-ed block.
1044 	 */
1045 	if (birth_txg != 0)
1046 		return (ds == NULL ||
1047 		    dsl_dataset_block_freeable(ds, NULL, birth_txg));
1048 	else
1049 		return (B_FALSE);
1050 }
1051 
1052 void
1053 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1054 {
1055 	arc_buf_t *buf, *obuf;
1056 	int osize = db->db.db_size;
1057 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1058 	dnode_t *dn;
1059 
1060 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1061 
1062 	DB_DNODE_ENTER(db);
1063 	dn = DB_DNODE(db);
1064 
1065 	/* XXX does *this* func really need the lock? */
1066 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1067 
1068 	/*
1069 	 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
1070 	 * is OK, because there can be no other references to the db
1071 	 * when we are changing its size, so no concurrent DB_FILL can
1072 	 * be happening.
1073 	 */
1074 	/*
1075 	 * XXX we should be doing a dbuf_read, checking the return
1076 	 * value and returning that up to our callers
1077 	 */
1078 	dmu_buf_will_dirty(&db->db, tx);
1079 
1080 	/* create the data buffer for the new block */
1081 	buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
1082 
1083 	/* copy old block data to the new block */
1084 	obuf = db->db_buf;
1085 	bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1086 	/* zero the remainder */
1087 	if (size > osize)
1088 		bzero((uint8_t *)buf->b_data + osize, size - osize);
1089 
1090 	mutex_enter(&db->db_mtx);
1091 	dbuf_set_data(db, buf);
1092 	VERIFY(arc_buf_remove_ref(obuf, db));
1093 	db->db.db_size = size;
1094 
1095 	if (db->db_level == 0) {
1096 		ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1097 		db->db_last_dirty->dt.dl.dr_data = buf;
1098 	}
1099 	mutex_exit(&db->db_mtx);
1100 
1101 	dnode_willuse_space(dn, size-osize, tx);
1102 	DB_DNODE_EXIT(db);
1103 }
1104 
1105 void
1106 dbuf_release_bp(dmu_buf_impl_t *db)
1107 {
1108 	objset_t *os = db->db_objset;
1109 
1110 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1111 	ASSERT(arc_released(os->os_phys_buf) ||
1112 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1113 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1114 
1115 	(void) arc_release(db->db_buf, db);
1116 }
1117 
1118 dbuf_dirty_record_t *
1119 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1120 {
1121 	dnode_t *dn;
1122 	objset_t *os;
1123 	dbuf_dirty_record_t **drp, *dr;
1124 	int drop_struct_lock = FALSE;
1125 	boolean_t do_free_accounting = B_FALSE;
1126 	int txgoff = tx->tx_txg & TXG_MASK;
1127 
1128 	ASSERT(tx->tx_txg != 0);
1129 	ASSERT(!refcount_is_zero(&db->db_holds));
1130 	DMU_TX_DIRTY_BUF(tx, db);
1131 
1132 	DB_DNODE_ENTER(db);
1133 	dn = DB_DNODE(db);
1134 	/*
1135 	 * Shouldn't dirty a regular buffer in syncing context.  Private
1136 	 * objects may be dirtied in syncing context, but only if they
1137 	 * were already pre-dirtied in open context.
1138 	 */
1139 	ASSERT(!dmu_tx_is_syncing(tx) ||
1140 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1141 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1142 	    dn->dn_objset->os_dsl_dataset == NULL);
1143 	/*
1144 	 * We make this assert for private objects as well, but after we
1145 	 * check if we're already dirty.  They are allowed to re-dirty
1146 	 * in syncing context.
1147 	 */
1148 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1149 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1150 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1151 
1152 	mutex_enter(&db->db_mtx);
1153 	/*
1154 	 * XXX make this true for indirects too?  The problem is that
1155 	 * transactions created with dmu_tx_create_assigned() from
1156 	 * syncing context don't bother holding ahead.
1157 	 */
1158 	ASSERT(db->db_level != 0 ||
1159 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1160 	    db->db_state == DB_NOFILL);
1161 
1162 	mutex_enter(&dn->dn_mtx);
1163 	/*
1164 	 * Don't set dirtyctx to SYNC if we're just modifying this as we
1165 	 * initialize the objset.
1166 	 */
1167 	if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1168 	    !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1169 		dn->dn_dirtyctx =
1170 		    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1171 		ASSERT(dn->dn_dirtyctx_firstset == NULL);
1172 		dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1173 	}
1174 	mutex_exit(&dn->dn_mtx);
1175 
1176 	if (db->db_blkid == DMU_SPILL_BLKID)
1177 		dn->dn_have_spill = B_TRUE;
1178 
1179 	/*
1180 	 * If this buffer is already dirty, we're done.
1181 	 */
1182 	drp = &db->db_last_dirty;
1183 	ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1184 	    db->db.db_object == DMU_META_DNODE_OBJECT);
1185 	while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1186 		drp = &dr->dr_next;
1187 	if (dr && dr->dr_txg == tx->tx_txg) {
1188 		DB_DNODE_EXIT(db);
1189 
1190 		if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1191 			/*
1192 			 * If this buffer has already been written out,
1193 			 * we now need to reset its state.
1194 			 */
1195 			dbuf_unoverride(dr);
1196 			if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1197 			    db->db_state != DB_NOFILL)
1198 				arc_buf_thaw(db->db_buf);
1199 		}
1200 		mutex_exit(&db->db_mtx);
1201 		return (dr);
1202 	}
1203 
1204 	/*
1205 	 * Only valid if not already dirty.
1206 	 */
1207 	ASSERT(dn->dn_object == 0 ||
1208 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1209 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1210 
1211 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
1212 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1213 	    dn->dn_phys->dn_nlevels > db->db_level ||
1214 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
1215 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1216 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1217 
1218 	/*
1219 	 * We should only be dirtying in syncing context if it's the
1220 	 * mos or we're initializing the os or it's a special object.
1221 	 * However, we are allowed to dirty in syncing context provided
1222 	 * we already dirtied it in open context.  Hence we must make
1223 	 * this assertion only if we're not already dirty.
1224 	 */
1225 	os = dn->dn_objset;
1226 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1227 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1228 	ASSERT(db->db.db_size != 0);
1229 
1230 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1231 
1232 	if (db->db_blkid != DMU_BONUS_BLKID) {
1233 		/*
1234 		 * Update the accounting.
1235 		 * Note: we delay "free accounting" until after we drop
1236 		 * the db_mtx.  This keeps us from grabbing other locks
1237 		 * (and possibly deadlocking) in bp_get_dsize() while
1238 		 * also holding the db_mtx.
1239 		 */
1240 		dnode_willuse_space(dn, db->db.db_size, tx);
1241 		do_free_accounting = dbuf_block_freeable(db);
1242 	}
1243 
1244 	/*
1245 	 * If this buffer is dirty in an old transaction group we need
1246 	 * to make a copy of it so that the changes we make in this
1247 	 * transaction group won't leak out when we sync the older txg.
1248 	 */
1249 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1250 	if (db->db_level == 0) {
1251 		void *data_old = db->db_buf;
1252 
1253 		if (db->db_state != DB_NOFILL) {
1254 			if (db->db_blkid == DMU_BONUS_BLKID) {
1255 				dbuf_fix_old_data(db, tx->tx_txg);
1256 				data_old = db->db.db_data;
1257 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1258 				/*
1259 				 * Release the data buffer from the cache so
1260 				 * that we can modify it without impacting
1261 				 * possible other users of this cached data
1262 				 * block.  Note that indirect blocks and
1263 				 * private objects are not released until the
1264 				 * syncing state (since they are only modified
1265 				 * then).
1266 				 */
1267 				arc_release(db->db_buf, db);
1268 				dbuf_fix_old_data(db, tx->tx_txg);
1269 				data_old = db->db_buf;
1270 			}
1271 			ASSERT(data_old != NULL);
1272 		}
1273 		dr->dt.dl.dr_data = data_old;
1274 	} else {
1275 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1276 		list_create(&dr->dt.di.dr_children,
1277 		    sizeof (dbuf_dirty_record_t),
1278 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
1279 	}
1280 	if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1281 		dr->dr_accounted = db->db.db_size;
1282 	dr->dr_dbuf = db;
1283 	dr->dr_txg = tx->tx_txg;
1284 	dr->dr_next = *drp;
1285 	*drp = dr;
1286 
1287 	/*
1288 	 * We could have been freed_in_flight between the dbuf_noread
1289 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
1290 	 * happened after the free.
1291 	 */
1292 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1293 	    db->db_blkid != DMU_SPILL_BLKID) {
1294 		mutex_enter(&dn->dn_mtx);
1295 		if (dn->dn_free_ranges[txgoff] != NULL) {
1296 			range_tree_clear(dn->dn_free_ranges[txgoff],
1297 			    db->db_blkid, 1);
1298 		}
1299 		mutex_exit(&dn->dn_mtx);
1300 		db->db_freed_in_flight = FALSE;
1301 	}
1302 
1303 	/*
1304 	 * This buffer is now part of this txg
1305 	 */
1306 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1307 	db->db_dirtycnt += 1;
1308 	ASSERT3U(db->db_dirtycnt, <=, 3);
1309 
1310 	mutex_exit(&db->db_mtx);
1311 
1312 	if (db->db_blkid == DMU_BONUS_BLKID ||
1313 	    db->db_blkid == DMU_SPILL_BLKID) {
1314 		mutex_enter(&dn->dn_mtx);
1315 		ASSERT(!list_link_active(&dr->dr_dirty_node));
1316 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1317 		mutex_exit(&dn->dn_mtx);
1318 		dnode_setdirty(dn, tx);
1319 		DB_DNODE_EXIT(db);
1320 		return (dr);
1321 	} else if (do_free_accounting) {
1322 		blkptr_t *bp = db->db_blkptr;
1323 		int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1324 		    bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1325 		/*
1326 		 * This is only a guess -- if the dbuf is dirty
1327 		 * in a previous txg, we don't know how much
1328 		 * space it will use on disk yet.  We should
1329 		 * really have the struct_rwlock to access
1330 		 * db_blkptr, but since this is just a guess,
1331 		 * it's OK if we get an odd answer.
1332 		 */
1333 		ddt_prefetch(os->os_spa, bp);
1334 		dnode_willuse_space(dn, -willfree, tx);
1335 	}
1336 
1337 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1338 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1339 		drop_struct_lock = TRUE;
1340 	}
1341 
1342 	if (db->db_level == 0) {
1343 		dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1344 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
1345 	}
1346 
1347 	if (db->db_level+1 < dn->dn_nlevels) {
1348 		dmu_buf_impl_t *parent = db->db_parent;
1349 		dbuf_dirty_record_t *di;
1350 		int parent_held = FALSE;
1351 
1352 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1353 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1354 
1355 			parent = dbuf_hold_level(dn, db->db_level+1,
1356 			    db->db_blkid >> epbs, FTAG);
1357 			ASSERT(parent != NULL);
1358 			parent_held = TRUE;
1359 		}
1360 		if (drop_struct_lock)
1361 			rw_exit(&dn->dn_struct_rwlock);
1362 		ASSERT3U(db->db_level+1, ==, parent->db_level);
1363 		di = dbuf_dirty(parent, tx);
1364 		if (parent_held)
1365 			dbuf_rele(parent, FTAG);
1366 
1367 		mutex_enter(&db->db_mtx);
1368 		/*
1369 		 * Since we've dropped the mutex, it's possible that
1370 		 * dbuf_undirty() might have changed this out from under us.
1371 		 */
1372 		if (db->db_last_dirty == dr ||
1373 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
1374 			mutex_enter(&di->dt.di.dr_mtx);
1375 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1376 			ASSERT(!list_link_active(&dr->dr_dirty_node));
1377 			list_insert_tail(&di->dt.di.dr_children, dr);
1378 			mutex_exit(&di->dt.di.dr_mtx);
1379 			dr->dr_parent = di;
1380 		}
1381 		mutex_exit(&db->db_mtx);
1382 	} else {
1383 		ASSERT(db->db_level+1 == dn->dn_nlevels);
1384 		ASSERT(db->db_blkid < dn->dn_nblkptr);
1385 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1386 		mutex_enter(&dn->dn_mtx);
1387 		ASSERT(!list_link_active(&dr->dr_dirty_node));
1388 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1389 		mutex_exit(&dn->dn_mtx);
1390 		if (drop_struct_lock)
1391 			rw_exit(&dn->dn_struct_rwlock);
1392 	}
1393 
1394 	dnode_setdirty(dn, tx);
1395 	DB_DNODE_EXIT(db);
1396 	return (dr);
1397 }
1398 
1399 /*
1400  * Undirty a buffer in the transaction group referenced by the given
1401  * transaction.  Return whether this evicted the dbuf.
1402  */
1403 static boolean_t
1404 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1405 {
1406 	dnode_t *dn;
1407 	uint64_t txg = tx->tx_txg;
1408 	dbuf_dirty_record_t *dr, **drp;
1409 
1410 	ASSERT(txg != 0);
1411 
1412 	/*
1413 	 * Due to our use of dn_nlevels below, this can only be called
1414 	 * in open context, unless we are operating on the MOS.
1415 	 * From syncing context, dn_nlevels may be different from the
1416 	 * dn_nlevels used when dbuf was dirtied.
1417 	 */
1418 	ASSERT(db->db_objset ==
1419 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
1420 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
1421 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1422 	ASSERT0(db->db_level);
1423 	ASSERT(MUTEX_HELD(&db->db_mtx));
1424 
1425 	/*
1426 	 * If this buffer is not dirty, we're done.
1427 	 */
1428 	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1429 		if (dr->dr_txg <= txg)
1430 			break;
1431 	if (dr == NULL || dr->dr_txg < txg)
1432 		return (B_FALSE);
1433 	ASSERT(dr->dr_txg == txg);
1434 	ASSERT(dr->dr_dbuf == db);
1435 
1436 	DB_DNODE_ENTER(db);
1437 	dn = DB_DNODE(db);
1438 
1439 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1440 
1441 	ASSERT(db->db.db_size != 0);
1442 
1443 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
1444 	    dr->dr_accounted, txg);
1445 
1446 	*drp = dr->dr_next;
1447 
1448 	/*
1449 	 * Note that there are three places in dbuf_dirty()
1450 	 * where this dirty record may be put on a list.
1451 	 * Make sure to do a list_remove corresponding to
1452 	 * every one of those list_insert calls.
1453 	 */
1454 	if (dr->dr_parent) {
1455 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1456 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1457 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1458 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
1459 	    db->db_level + 1 == dn->dn_nlevels) {
1460 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1461 		mutex_enter(&dn->dn_mtx);
1462 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1463 		mutex_exit(&dn->dn_mtx);
1464 	}
1465 	DB_DNODE_EXIT(db);
1466 
1467 	if (db->db_state != DB_NOFILL) {
1468 		dbuf_unoverride(dr);
1469 
1470 		ASSERT(db->db_buf != NULL);
1471 		ASSERT(dr->dt.dl.dr_data != NULL);
1472 		if (dr->dt.dl.dr_data != db->db_buf)
1473 			VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1474 	}
1475 
1476 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
1477 
1478 	ASSERT(db->db_dirtycnt > 0);
1479 	db->db_dirtycnt -= 1;
1480 
1481 	if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1482 		arc_buf_t *buf = db->db_buf;
1483 
1484 		ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1485 		dbuf_clear_data(db);
1486 		VERIFY(arc_buf_remove_ref(buf, db));
1487 		dbuf_evict(db);
1488 		return (B_TRUE);
1489 	}
1490 
1491 	return (B_FALSE);
1492 }
1493 
1494 void
1495 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1496 {
1497 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1498 	int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1499 
1500 	ASSERT(tx->tx_txg != 0);
1501 	ASSERT(!refcount_is_zero(&db->db_holds));
1502 
1503 	DB_DNODE_ENTER(db);
1504 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1505 		rf |= DB_RF_HAVESTRUCT;
1506 	DB_DNODE_EXIT(db);
1507 	(void) dbuf_read(db, NULL, rf);
1508 	(void) dbuf_dirty(db, tx);
1509 }
1510 
1511 void
1512 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1513 {
1514 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1515 
1516 	db->db_state = DB_NOFILL;
1517 
1518 	dmu_buf_will_fill(db_fake, tx);
1519 }
1520 
1521 void
1522 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1523 {
1524 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1525 
1526 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1527 	ASSERT(tx->tx_txg != 0);
1528 	ASSERT(db->db_level == 0);
1529 	ASSERT(!refcount_is_zero(&db->db_holds));
1530 
1531 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1532 	    dmu_tx_private_ok(tx));
1533 
1534 	dbuf_noread(db);
1535 	(void) dbuf_dirty(db, tx);
1536 }
1537 
1538 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1539 /* ARGSUSED */
1540 void
1541 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1542 {
1543 	mutex_enter(&db->db_mtx);
1544 	DBUF_VERIFY(db);
1545 
1546 	if (db->db_state == DB_FILL) {
1547 		if (db->db_level == 0 && db->db_freed_in_flight) {
1548 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1549 			/* we were freed while filling */
1550 			/* XXX dbuf_undirty? */
1551 			bzero(db->db.db_data, db->db.db_size);
1552 			db->db_freed_in_flight = FALSE;
1553 		}
1554 		db->db_state = DB_CACHED;
1555 		cv_broadcast(&db->db_changed);
1556 	}
1557 	mutex_exit(&db->db_mtx);
1558 }
1559 
1560 void
1561 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1562     bp_embedded_type_t etype, enum zio_compress comp,
1563     int uncompressed_size, int compressed_size, int byteorder,
1564     dmu_tx_t *tx)
1565 {
1566 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
1567 	struct dirty_leaf *dl;
1568 	dmu_object_type_t type;
1569 
1570 	if (etype == BP_EMBEDDED_TYPE_DATA) {
1571 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
1572 		    SPA_FEATURE_EMBEDDED_DATA));
1573 	}
1574 
1575 	DB_DNODE_ENTER(db);
1576 	type = DB_DNODE(db)->dn_type;
1577 	DB_DNODE_EXIT(db);
1578 
1579 	ASSERT0(db->db_level);
1580 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1581 
1582 	dmu_buf_will_not_fill(dbuf, tx);
1583 
1584 	ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1585 	dl = &db->db_last_dirty->dt.dl;
1586 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
1587 	    data, comp, uncompressed_size, compressed_size);
1588 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
1589 	BP_SET_TYPE(&dl->dr_overridden_by, type);
1590 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
1591 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
1592 
1593 	dl->dr_override_state = DR_OVERRIDDEN;
1594 	dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
1595 }
1596 
1597 /*
1598  * Directly assign a provided arc buf to a given dbuf if it's not referenced
1599  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1600  */
1601 void
1602 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1603 {
1604 	ASSERT(!refcount_is_zero(&db->db_holds));
1605 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1606 	ASSERT(db->db_level == 0);
1607 	ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1608 	ASSERT(buf != NULL);
1609 	ASSERT(arc_buf_size(buf) == db->db.db_size);
1610 	ASSERT(tx->tx_txg != 0);
1611 
1612 	arc_return_buf(buf, db);
1613 	ASSERT(arc_released(buf));
1614 
1615 	mutex_enter(&db->db_mtx);
1616 
1617 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1618 		cv_wait(&db->db_changed, &db->db_mtx);
1619 
1620 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1621 
1622 	if (db->db_state == DB_CACHED &&
1623 	    refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1624 		mutex_exit(&db->db_mtx);
1625 		(void) dbuf_dirty(db, tx);
1626 		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1627 		VERIFY(arc_buf_remove_ref(buf, db));
1628 		xuio_stat_wbuf_copied();
1629 		return;
1630 	}
1631 
1632 	xuio_stat_wbuf_nocopy();
1633 	if (db->db_state == DB_CACHED) {
1634 		dbuf_dirty_record_t *dr = db->db_last_dirty;
1635 
1636 		ASSERT(db->db_buf != NULL);
1637 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1638 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
1639 			if (!arc_released(db->db_buf)) {
1640 				ASSERT(dr->dt.dl.dr_override_state ==
1641 				    DR_OVERRIDDEN);
1642 				arc_release(db->db_buf, db);
1643 			}
1644 			dr->dt.dl.dr_data = buf;
1645 			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1646 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1647 			arc_release(db->db_buf, db);
1648 			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1649 		}
1650 		db->db_buf = NULL;
1651 	}
1652 	ASSERT(db->db_buf == NULL);
1653 	dbuf_set_data(db, buf);
1654 	db->db_state = DB_FILL;
1655 	mutex_exit(&db->db_mtx);
1656 	(void) dbuf_dirty(db, tx);
1657 	dmu_buf_fill_done(&db->db, tx);
1658 }
1659 
1660 /*
1661  * "Clear" the contents of this dbuf.  This will mark the dbuf
1662  * EVICTING and clear *most* of its references.  Unfortunately,
1663  * when we are not holding the dn_dbufs_mtx, we can't clear the
1664  * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1665  * in this case.  For callers from the DMU we will usually see:
1666  *	dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy()
1667  * For the arc callback, we will usually see:
1668  *	dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1669  * Sometimes, though, we will get a mix of these two:
1670  *	DMU: dbuf_clear()->arc_clear_callback()
1671  *	ARC: dbuf_do_evict()->dbuf_destroy()
1672  *
1673  * This routine will dissociate the dbuf from the arc, by calling
1674  * arc_clear_callback(), but will not evict the data from the ARC.
1675  */
1676 void
1677 dbuf_clear(dmu_buf_impl_t *db)
1678 {
1679 	dnode_t *dn;
1680 	dmu_buf_impl_t *parent = db->db_parent;
1681 	dmu_buf_impl_t *dndb;
1682 	boolean_t dbuf_gone = B_FALSE;
1683 
1684 	ASSERT(MUTEX_HELD(&db->db_mtx));
1685 	ASSERT(refcount_is_zero(&db->db_holds));
1686 
1687 	dbuf_evict_user(db);
1688 
1689 	if (db->db_state == DB_CACHED) {
1690 		ASSERT(db->db.db_data != NULL);
1691 		if (db->db_blkid == DMU_BONUS_BLKID) {
1692 			zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1693 			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1694 		}
1695 		db->db.db_data = NULL;
1696 		db->db_state = DB_UNCACHED;
1697 	}
1698 
1699 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1700 	ASSERT(db->db_data_pending == NULL);
1701 
1702 	db->db_state = DB_EVICTING;
1703 	db->db_blkptr = NULL;
1704 
1705 	DB_DNODE_ENTER(db);
1706 	dn = DB_DNODE(db);
1707 	dndb = dn->dn_dbuf;
1708 	if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1709 		avl_remove(&dn->dn_dbufs, db);
1710 		atomic_dec_32(&dn->dn_dbufs_count);
1711 		membar_producer();
1712 		DB_DNODE_EXIT(db);
1713 		/*
1714 		 * Decrementing the dbuf count means that the hold corresponding
1715 		 * to the removed dbuf is no longer discounted in dnode_move(),
1716 		 * so the dnode cannot be moved until after we release the hold.
1717 		 * The membar_producer() ensures visibility of the decremented
1718 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1719 		 * release any lock.
1720 		 */
1721 		dnode_rele(dn, db);
1722 		db->db_dnode_handle = NULL;
1723 	} else {
1724 		DB_DNODE_EXIT(db);
1725 	}
1726 
1727 	if (db->db_buf)
1728 		dbuf_gone = arc_clear_callback(db->db_buf);
1729 
1730 	if (!dbuf_gone)
1731 		mutex_exit(&db->db_mtx);
1732 
1733 	/*
1734 	 * If this dbuf is referenced from an indirect dbuf,
1735 	 * decrement the ref count on the indirect dbuf.
1736 	 */
1737 	if (parent && parent != dndb)
1738 		dbuf_rele(parent, db);
1739 }
1740 
1741 /*
1742  * Note: While bpp will always be updated if the function returns success,
1743  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
1744  * this happens when the dnode is the meta-dnode, or a userused or groupused
1745  * object.
1746  */
1747 static int
1748 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1749     dmu_buf_impl_t **parentp, blkptr_t **bpp)
1750 {
1751 	int nlevels, epbs;
1752 
1753 	*parentp = NULL;
1754 	*bpp = NULL;
1755 
1756 	ASSERT(blkid != DMU_BONUS_BLKID);
1757 
1758 	if (blkid == DMU_SPILL_BLKID) {
1759 		mutex_enter(&dn->dn_mtx);
1760 		if (dn->dn_have_spill &&
1761 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1762 			*bpp = &dn->dn_phys->dn_spill;
1763 		else
1764 			*bpp = NULL;
1765 		dbuf_add_ref(dn->dn_dbuf, NULL);
1766 		*parentp = dn->dn_dbuf;
1767 		mutex_exit(&dn->dn_mtx);
1768 		return (0);
1769 	}
1770 
1771 	if (dn->dn_phys->dn_nlevels == 0)
1772 		nlevels = 1;
1773 	else
1774 		nlevels = dn->dn_phys->dn_nlevels;
1775 
1776 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1777 
1778 	ASSERT3U(level * epbs, <, 64);
1779 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1780 	if (level >= nlevels ||
1781 	    (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1782 		/* the buffer has no parent yet */
1783 		return (SET_ERROR(ENOENT));
1784 	} else if (level < nlevels-1) {
1785 		/* this block is referenced from an indirect block */
1786 		int err = dbuf_hold_impl(dn, level+1,
1787 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
1788 		if (err)
1789 			return (err);
1790 		err = dbuf_read(*parentp, NULL,
1791 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1792 		if (err) {
1793 			dbuf_rele(*parentp, NULL);
1794 			*parentp = NULL;
1795 			return (err);
1796 		}
1797 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1798 		    (blkid & ((1ULL << epbs) - 1));
1799 		return (0);
1800 	} else {
1801 		/* the block is referenced from the dnode */
1802 		ASSERT3U(level, ==, nlevels-1);
1803 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1804 		    blkid < dn->dn_phys->dn_nblkptr);
1805 		if (dn->dn_dbuf) {
1806 			dbuf_add_ref(dn->dn_dbuf, NULL);
1807 			*parentp = dn->dn_dbuf;
1808 		}
1809 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
1810 		return (0);
1811 	}
1812 }
1813 
1814 static dmu_buf_impl_t *
1815 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1816     dmu_buf_impl_t *parent, blkptr_t *blkptr)
1817 {
1818 	objset_t *os = dn->dn_objset;
1819 	dmu_buf_impl_t *db, *odb;
1820 
1821 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1822 	ASSERT(dn->dn_type != DMU_OT_NONE);
1823 
1824 	db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1825 
1826 	db->db_objset = os;
1827 	db->db.db_object = dn->dn_object;
1828 	db->db_level = level;
1829 	db->db_blkid = blkid;
1830 	db->db_last_dirty = NULL;
1831 	db->db_dirtycnt = 0;
1832 	db->db_dnode_handle = dn->dn_handle;
1833 	db->db_parent = parent;
1834 	db->db_blkptr = blkptr;
1835 
1836 	db->db_user = NULL;
1837 	db->db_immediate_evict = 0;
1838 	db->db_freed_in_flight = 0;
1839 
1840 	if (blkid == DMU_BONUS_BLKID) {
1841 		ASSERT3P(parent, ==, dn->dn_dbuf);
1842 		db->db.db_size = DN_MAX_BONUSLEN -
1843 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1844 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1845 		db->db.db_offset = DMU_BONUS_BLKID;
1846 		db->db_state = DB_UNCACHED;
1847 		/* the bonus dbuf is not placed in the hash table */
1848 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1849 		return (db);
1850 	} else if (blkid == DMU_SPILL_BLKID) {
1851 		db->db.db_size = (blkptr != NULL) ?
1852 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1853 		db->db.db_offset = 0;
1854 	} else {
1855 		int blocksize =
1856 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1857 		db->db.db_size = blocksize;
1858 		db->db.db_offset = db->db_blkid * blocksize;
1859 	}
1860 
1861 	/*
1862 	 * Hold the dn_dbufs_mtx while we get the new dbuf
1863 	 * in the hash table *and* added to the dbufs list.
1864 	 * This prevents a possible deadlock with someone
1865 	 * trying to look up this dbuf before its added to the
1866 	 * dn_dbufs list.
1867 	 */
1868 	mutex_enter(&dn->dn_dbufs_mtx);
1869 	db->db_state = DB_EVICTING;
1870 	if ((odb = dbuf_hash_insert(db)) != NULL) {
1871 		/* someone else inserted it first */
1872 		kmem_cache_free(dbuf_cache, db);
1873 		mutex_exit(&dn->dn_dbufs_mtx);
1874 		return (odb);
1875 	}
1876 	avl_add(&dn->dn_dbufs, db);
1877 	if (db->db_level == 0 && db->db_blkid >=
1878 	    dn->dn_unlisted_l0_blkid)
1879 		dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1880 	db->db_state = DB_UNCACHED;
1881 	mutex_exit(&dn->dn_dbufs_mtx);
1882 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1883 
1884 	if (parent && parent != dn->dn_dbuf)
1885 		dbuf_add_ref(parent, db);
1886 
1887 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1888 	    refcount_count(&dn->dn_holds) > 0);
1889 	(void) refcount_add(&dn->dn_holds, db);
1890 	atomic_inc_32(&dn->dn_dbufs_count);
1891 
1892 	dprintf_dbuf(db, "db=%p\n", db);
1893 
1894 	return (db);
1895 }
1896 
1897 static int
1898 dbuf_do_evict(void *private)
1899 {
1900 	dmu_buf_impl_t *db = private;
1901 
1902 	if (!MUTEX_HELD(&db->db_mtx))
1903 		mutex_enter(&db->db_mtx);
1904 
1905 	ASSERT(refcount_is_zero(&db->db_holds));
1906 
1907 	if (db->db_state != DB_EVICTING) {
1908 		ASSERT(db->db_state == DB_CACHED);
1909 		DBUF_VERIFY(db);
1910 		db->db_buf = NULL;
1911 		dbuf_evict(db);
1912 	} else {
1913 		mutex_exit(&db->db_mtx);
1914 		dbuf_destroy(db);
1915 	}
1916 	return (0);
1917 }
1918 
1919 static void
1920 dbuf_destroy(dmu_buf_impl_t *db)
1921 {
1922 	ASSERT(refcount_is_zero(&db->db_holds));
1923 
1924 	if (db->db_blkid != DMU_BONUS_BLKID) {
1925 		/*
1926 		 * If this dbuf is still on the dn_dbufs list,
1927 		 * remove it from that list.
1928 		 */
1929 		if (db->db_dnode_handle != NULL) {
1930 			dnode_t *dn;
1931 
1932 			DB_DNODE_ENTER(db);
1933 			dn = DB_DNODE(db);
1934 			mutex_enter(&dn->dn_dbufs_mtx);
1935 			avl_remove(&dn->dn_dbufs, db);
1936 			atomic_dec_32(&dn->dn_dbufs_count);
1937 			mutex_exit(&dn->dn_dbufs_mtx);
1938 			DB_DNODE_EXIT(db);
1939 			/*
1940 			 * Decrementing the dbuf count means that the hold
1941 			 * corresponding to the removed dbuf is no longer
1942 			 * discounted in dnode_move(), so the dnode cannot be
1943 			 * moved until after we release the hold.
1944 			 */
1945 			dnode_rele(dn, db);
1946 			db->db_dnode_handle = NULL;
1947 		}
1948 		dbuf_hash_remove(db);
1949 	}
1950 	db->db_parent = NULL;
1951 	db->db_buf = NULL;
1952 
1953 	ASSERT(db->db.db_data == NULL);
1954 	ASSERT(db->db_hash_next == NULL);
1955 	ASSERT(db->db_blkptr == NULL);
1956 	ASSERT(db->db_data_pending == NULL);
1957 
1958 	kmem_cache_free(dbuf_cache, db);
1959 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1960 }
1961 
1962 typedef struct dbuf_prefetch_arg {
1963 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
1964 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
1965 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
1966 	int dpa_curlevel; /* The current level that we're reading */
1967 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
1968 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
1969 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
1970 } dbuf_prefetch_arg_t;
1971 
1972 /*
1973  * Actually issue the prefetch read for the block given.
1974  */
1975 static void
1976 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
1977 {
1978 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1979 		return;
1980 
1981 	arc_flags_t aflags =
1982 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
1983 
1984 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
1985 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
1986 	ASSERT(dpa->dpa_zio != NULL);
1987 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
1988 	    dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1989 	    &aflags, &dpa->dpa_zb);
1990 }
1991 
1992 /*
1993  * Called when an indirect block above our prefetch target is read in.  This
1994  * will either read in the next indirect block down the tree or issue the actual
1995  * prefetch if the next block down is our target.
1996  */
1997 static void
1998 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
1999 {
2000 	dbuf_prefetch_arg_t *dpa = private;
2001 
2002 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
2003 	ASSERT3S(dpa->dpa_curlevel, >, 0);
2004 	if (zio != NULL) {
2005 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
2006 		ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
2007 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
2008 	}
2009 
2010 	dpa->dpa_curlevel--;
2011 
2012 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
2013 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
2014 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
2015 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
2016 	if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) {
2017 		kmem_free(dpa, sizeof (*dpa));
2018 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
2019 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
2020 		dbuf_issue_final_prefetch(dpa, bp);
2021 		kmem_free(dpa, sizeof (*dpa));
2022 	} else {
2023 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2024 		zbookmark_phys_t zb;
2025 
2026 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2027 
2028 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
2029 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
2030 
2031 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2032 		    bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
2033 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2034 		    &iter_aflags, &zb);
2035 	}
2036 	(void) arc_buf_remove_ref(abuf, private);
2037 }
2038 
2039 /*
2040  * Issue prefetch reads for the given block on the given level.  If the indirect
2041  * blocks above that block are not in memory, we will read them in
2042  * asynchronously.  As a result, this call never blocks waiting for a read to
2043  * complete.
2044  */
2045 void
2046 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
2047     arc_flags_t aflags)
2048 {
2049 	blkptr_t bp;
2050 	int epbs, nlevels, curlevel;
2051 	uint64_t curblkid;
2052 
2053 	ASSERT(blkid != DMU_BONUS_BLKID);
2054 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2055 
2056 	if (blkid > dn->dn_maxblkid)
2057 		return;
2058 
2059 	if (dnode_block_freed(dn, blkid))
2060 		return;
2061 
2062 	/*
2063 	 * This dnode hasn't been written to disk yet, so there's nothing to
2064 	 * prefetch.
2065 	 */
2066 	nlevels = dn->dn_phys->dn_nlevels;
2067 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
2068 		return;
2069 
2070 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2071 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
2072 		return;
2073 
2074 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
2075 	    level, blkid);
2076 	if (db != NULL) {
2077 		mutex_exit(&db->db_mtx);
2078 		/*
2079 		 * This dbuf already exists.  It is either CACHED, or
2080 		 * (we assume) about to be read or filled.
2081 		 */
2082 		return;
2083 	}
2084 
2085 	/*
2086 	 * Find the closest ancestor (indirect block) of the target block
2087 	 * that is present in the cache.  In this indirect block, we will
2088 	 * find the bp that is at curlevel, curblkid.
2089 	 */
2090 	curlevel = level;
2091 	curblkid = blkid;
2092 	while (curlevel < nlevels - 1) {
2093 		int parent_level = curlevel + 1;
2094 		uint64_t parent_blkid = curblkid >> epbs;
2095 		dmu_buf_impl_t *db;
2096 
2097 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
2098 		    FALSE, TRUE, FTAG, &db) == 0) {
2099 			blkptr_t *bpp = db->db_buf->b_data;
2100 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
2101 			dbuf_rele(db, FTAG);
2102 			break;
2103 		}
2104 
2105 		curlevel = parent_level;
2106 		curblkid = parent_blkid;
2107 	}
2108 
2109 	if (curlevel == nlevels - 1) {
2110 		/* No cached indirect blocks found. */
2111 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
2112 		bp = dn->dn_phys->dn_blkptr[curblkid];
2113 	}
2114 	if (BP_IS_HOLE(&bp))
2115 		return;
2116 
2117 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
2118 
2119 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
2120 	    ZIO_FLAG_CANFAIL);
2121 
2122 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
2123 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
2124 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2125 	    dn->dn_object, level, blkid);
2126 	dpa->dpa_curlevel = curlevel;
2127 	dpa->dpa_prio = prio;
2128 	dpa->dpa_aflags = aflags;
2129 	dpa->dpa_spa = dn->dn_objset->os_spa;
2130 	dpa->dpa_epbs = epbs;
2131 	dpa->dpa_zio = pio;
2132 
2133 	/*
2134 	 * If we have the indirect just above us, no need to do the asynchronous
2135 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
2136 	 * a higher level, though, we want to issue the prefetches for all the
2137 	 * indirect blocks asynchronously, so we can go on with whatever we were
2138 	 * doing.
2139 	 */
2140 	if (curlevel == level) {
2141 		ASSERT3U(curblkid, ==, blkid);
2142 		dbuf_issue_final_prefetch(dpa, &bp);
2143 		kmem_free(dpa, sizeof (*dpa));
2144 	} else {
2145 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2146 		zbookmark_phys_t zb;
2147 
2148 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2149 		    dn->dn_object, curlevel, curblkid);
2150 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2151 		    &bp, dbuf_prefetch_indirect_done, dpa, prio,
2152 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2153 		    &iter_aflags, &zb);
2154 	}
2155 	/*
2156 	 * We use pio here instead of dpa_zio since it's possible that
2157 	 * dpa may have already been freed.
2158 	 */
2159 	zio_nowait(pio);
2160 }
2161 
2162 /*
2163  * Returns with db_holds incremented, and db_mtx not held.
2164  * Note: dn_struct_rwlock must be held.
2165  */
2166 int
2167 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
2168     boolean_t fail_sparse, boolean_t fail_uncached,
2169     void *tag, dmu_buf_impl_t **dbp)
2170 {
2171 	dmu_buf_impl_t *db, *parent = NULL;
2172 
2173 	ASSERT(blkid != DMU_BONUS_BLKID);
2174 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2175 	ASSERT3U(dn->dn_nlevels, >, level);
2176 
2177 	*dbp = NULL;
2178 top:
2179 	/* dbuf_find() returns with db_mtx held */
2180 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
2181 
2182 	if (db == NULL) {
2183 		blkptr_t *bp = NULL;
2184 		int err;
2185 
2186 		if (fail_uncached)
2187 			return (SET_ERROR(ENOENT));
2188 
2189 		ASSERT3P(parent, ==, NULL);
2190 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
2191 		if (fail_sparse) {
2192 			if (err == 0 && bp && BP_IS_HOLE(bp))
2193 				err = SET_ERROR(ENOENT);
2194 			if (err) {
2195 				if (parent)
2196 					dbuf_rele(parent, NULL);
2197 				return (err);
2198 			}
2199 		}
2200 		if (err && err != ENOENT)
2201 			return (err);
2202 		db = dbuf_create(dn, level, blkid, parent, bp);
2203 	}
2204 
2205 	if (fail_uncached && db->db_state != DB_CACHED) {
2206 		mutex_exit(&db->db_mtx);
2207 		return (SET_ERROR(ENOENT));
2208 	}
2209 
2210 	if (db->db_buf && refcount_is_zero(&db->db_holds)) {
2211 		arc_buf_add_ref(db->db_buf, db);
2212 		if (db->db_buf->b_data == NULL) {
2213 			dbuf_clear(db);
2214 			if (parent) {
2215 				dbuf_rele(parent, NULL);
2216 				parent = NULL;
2217 			}
2218 			goto top;
2219 		}
2220 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
2221 	}
2222 
2223 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
2224 
2225 	/*
2226 	 * If this buffer is currently syncing out, and we are are
2227 	 * still referencing it from db_data, we need to make a copy
2228 	 * of it in case we decide we want to dirty it again in this txg.
2229 	 */
2230 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2231 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2232 	    db->db_state == DB_CACHED && db->db_data_pending) {
2233 		dbuf_dirty_record_t *dr = db->db_data_pending;
2234 
2235 		if (dr->dt.dl.dr_data == db->db_buf) {
2236 			arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2237 
2238 			dbuf_set_data(db,
2239 			    arc_buf_alloc(dn->dn_objset->os_spa,
2240 			    db->db.db_size, db, type));
2241 			bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
2242 			    db->db.db_size);
2243 		}
2244 	}
2245 
2246 	(void) refcount_add(&db->db_holds, tag);
2247 	DBUF_VERIFY(db);
2248 	mutex_exit(&db->db_mtx);
2249 
2250 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
2251 	if (parent)
2252 		dbuf_rele(parent, NULL);
2253 
2254 	ASSERT3P(DB_DNODE(db), ==, dn);
2255 	ASSERT3U(db->db_blkid, ==, blkid);
2256 	ASSERT3U(db->db_level, ==, level);
2257 	*dbp = db;
2258 
2259 	return (0);
2260 }
2261 
2262 dmu_buf_impl_t *
2263 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2264 {
2265 	return (dbuf_hold_level(dn, 0, blkid, tag));
2266 }
2267 
2268 dmu_buf_impl_t *
2269 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2270 {
2271 	dmu_buf_impl_t *db;
2272 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
2273 	return (err ? NULL : db);
2274 }
2275 
2276 void
2277 dbuf_create_bonus(dnode_t *dn)
2278 {
2279 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2280 
2281 	ASSERT(dn->dn_bonus == NULL);
2282 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2283 }
2284 
2285 int
2286 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2287 {
2288 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2289 	dnode_t *dn;
2290 
2291 	if (db->db_blkid != DMU_SPILL_BLKID)
2292 		return (SET_ERROR(ENOTSUP));
2293 	if (blksz == 0)
2294 		blksz = SPA_MINBLOCKSIZE;
2295 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
2296 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2297 
2298 	DB_DNODE_ENTER(db);
2299 	dn = DB_DNODE(db);
2300 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2301 	dbuf_new_size(db, blksz, tx);
2302 	rw_exit(&dn->dn_struct_rwlock);
2303 	DB_DNODE_EXIT(db);
2304 
2305 	return (0);
2306 }
2307 
2308 void
2309 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2310 {
2311 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2312 }
2313 
2314 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2315 void
2316 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2317 {
2318 	int64_t holds = refcount_add(&db->db_holds, tag);
2319 	ASSERT(holds > 1);
2320 }
2321 
2322 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
2323 boolean_t
2324 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
2325     void *tag)
2326 {
2327 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2328 	dmu_buf_impl_t *found_db;
2329 	boolean_t result = B_FALSE;
2330 
2331 	if (db->db_blkid == DMU_BONUS_BLKID)
2332 		found_db = dbuf_find_bonus(os, obj);
2333 	else
2334 		found_db = dbuf_find(os, obj, 0, blkid);
2335 
2336 	if (found_db != NULL) {
2337 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
2338 			(void) refcount_add(&db->db_holds, tag);
2339 			result = B_TRUE;
2340 		}
2341 		mutex_exit(&db->db_mtx);
2342 	}
2343 	return (result);
2344 }
2345 
2346 /*
2347  * If you call dbuf_rele() you had better not be referencing the dnode handle
2348  * unless you have some other direct or indirect hold on the dnode. (An indirect
2349  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2350  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2351  * dnode's parent dbuf evicting its dnode handles.
2352  */
2353 void
2354 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2355 {
2356 	mutex_enter(&db->db_mtx);
2357 	dbuf_rele_and_unlock(db, tag);
2358 }
2359 
2360 void
2361 dmu_buf_rele(dmu_buf_t *db, void *tag)
2362 {
2363 	dbuf_rele((dmu_buf_impl_t *)db, tag);
2364 }
2365 
2366 /*
2367  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2368  * db_dirtycnt and db_holds to be updated atomically.
2369  */
2370 void
2371 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2372 {
2373 	int64_t holds;
2374 
2375 	ASSERT(MUTEX_HELD(&db->db_mtx));
2376 	DBUF_VERIFY(db);
2377 
2378 	/*
2379 	 * Remove the reference to the dbuf before removing its hold on the
2380 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
2381 	 * buffer has a corresponding dnode hold.
2382 	 */
2383 	holds = refcount_remove(&db->db_holds, tag);
2384 	ASSERT(holds >= 0);
2385 
2386 	/*
2387 	 * We can't freeze indirects if there is a possibility that they
2388 	 * may be modified in the current syncing context.
2389 	 */
2390 	if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2391 		arc_buf_freeze(db->db_buf);
2392 
2393 	if (holds == db->db_dirtycnt &&
2394 	    db->db_level == 0 && db->db_immediate_evict)
2395 		dbuf_evict_user(db);
2396 
2397 	if (holds == 0) {
2398 		if (db->db_blkid == DMU_BONUS_BLKID) {
2399 			dnode_t *dn;
2400 
2401 			/*
2402 			 * If the dnode moves here, we cannot cross this
2403 			 * barrier until the move completes.
2404 			 */
2405 			DB_DNODE_ENTER(db);
2406 
2407 			dn = DB_DNODE(db);
2408 			atomic_dec_32(&dn->dn_dbufs_count);
2409 
2410 			/*
2411 			 * Decrementing the dbuf count means that the bonus
2412 			 * buffer's dnode hold is no longer discounted in
2413 			 * dnode_move(). The dnode cannot move until after
2414 			 * the dnode_rele_and_unlock() below.
2415 			 */
2416 			DB_DNODE_EXIT(db);
2417 
2418 			/*
2419 			 * Do not reference db after its lock is dropped.
2420 			 * Another thread may evict it.
2421 			 */
2422 			mutex_exit(&db->db_mtx);
2423 
2424 			/*
2425 			 * If the dnode has been freed, evict the bonus
2426 			 * buffer immediately.	The data in the bonus
2427 			 * buffer is no longer relevant and this prevents
2428 			 * a stale bonus buffer from being associated
2429 			 * with this dnode_t should the dnode_t be reused
2430 			 * prior to being destroyed.
2431 			 */
2432 			mutex_enter(&dn->dn_mtx);
2433 			if (dn->dn_type == DMU_OT_NONE ||
2434 			    dn->dn_free_txg != 0) {
2435 				/*
2436 				 * Drop dn_mtx.  It is a leaf lock and
2437 				 * cannot be held when dnode_evict_bonus()
2438 				 * acquires other locks in order to
2439 				 * perform the eviction.
2440 				 *
2441 				 * Freed dnodes cannot be reused until the
2442 				 * last hold is released.  Since this bonus
2443 				 * buffer has a hold, the dnode will remain
2444 				 * in the free state, even without dn_mtx
2445 				 * held, until the dnode_rele_and_unlock()
2446 				 * below.
2447 				 */
2448 				mutex_exit(&dn->dn_mtx);
2449 				dnode_evict_bonus(dn);
2450 				mutex_enter(&dn->dn_mtx);
2451 			}
2452 			dnode_rele_and_unlock(dn, db);
2453 		} else if (db->db_buf == NULL) {
2454 			/*
2455 			 * This is a special case: we never associated this
2456 			 * dbuf with any data allocated from the ARC.
2457 			 */
2458 			ASSERT(db->db_state == DB_UNCACHED ||
2459 			    db->db_state == DB_NOFILL);
2460 			dbuf_evict(db);
2461 		} else if (arc_released(db->db_buf)) {
2462 			arc_buf_t *buf = db->db_buf;
2463 			/*
2464 			 * This dbuf has anonymous data associated with it.
2465 			 */
2466 			dbuf_clear_data(db);
2467 			VERIFY(arc_buf_remove_ref(buf, db));
2468 			dbuf_evict(db);
2469 		} else {
2470 			VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2471 
2472 			/*
2473 			 * A dbuf will be eligible for eviction if either the
2474 			 * 'primarycache' property is set or a duplicate
2475 			 * copy of this buffer is already cached in the arc.
2476 			 *
2477 			 * In the case of the 'primarycache' a buffer
2478 			 * is considered for eviction if it matches the
2479 			 * criteria set in the property.
2480 			 *
2481 			 * To decide if our buffer is considered a
2482 			 * duplicate, we must call into the arc to determine
2483 			 * if multiple buffers are referencing the same
2484 			 * block on-disk. If so, then we simply evict
2485 			 * ourselves.
2486 			 */
2487 			if (!DBUF_IS_CACHEABLE(db)) {
2488 				if (db->db_blkptr != NULL &&
2489 				    !BP_IS_HOLE(db->db_blkptr) &&
2490 				    !BP_IS_EMBEDDED(db->db_blkptr)) {
2491 					spa_t *spa =
2492 					    dmu_objset_spa(db->db_objset);
2493 					blkptr_t bp = *db->db_blkptr;
2494 					dbuf_clear(db);
2495 					arc_freed(spa, &bp);
2496 				} else {
2497 					dbuf_clear(db);
2498 				}
2499 			} else if (db->db_objset->os_evicting ||
2500 			    arc_buf_eviction_needed(db->db_buf)) {
2501 				dbuf_clear(db);
2502 			} else {
2503 				mutex_exit(&db->db_mtx);
2504 			}
2505 		}
2506 	} else {
2507 		mutex_exit(&db->db_mtx);
2508 	}
2509 }
2510 
2511 #pragma weak dmu_buf_refcount = dbuf_refcount
2512 uint64_t
2513 dbuf_refcount(dmu_buf_impl_t *db)
2514 {
2515 	return (refcount_count(&db->db_holds));
2516 }
2517 
2518 void *
2519 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
2520     dmu_buf_user_t *new_user)
2521 {
2522 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2523 
2524 	mutex_enter(&db->db_mtx);
2525 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
2526 	if (db->db_user == old_user)
2527 		db->db_user = new_user;
2528 	else
2529 		old_user = db->db_user;
2530 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
2531 	mutex_exit(&db->db_mtx);
2532 
2533 	return (old_user);
2534 }
2535 
2536 void *
2537 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2538 {
2539 	return (dmu_buf_replace_user(db_fake, NULL, user));
2540 }
2541 
2542 void *
2543 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2544 {
2545 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2546 
2547 	db->db_immediate_evict = TRUE;
2548 	return (dmu_buf_set_user(db_fake, user));
2549 }
2550 
2551 void *
2552 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2553 {
2554 	return (dmu_buf_replace_user(db_fake, user, NULL));
2555 }
2556 
2557 void *
2558 dmu_buf_get_user(dmu_buf_t *db_fake)
2559 {
2560 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2561 
2562 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
2563 	return (db->db_user);
2564 }
2565 
2566 void
2567 dmu_buf_user_evict_wait()
2568 {
2569 	taskq_wait(dbu_evict_taskq);
2570 }
2571 
2572 boolean_t
2573 dmu_buf_freeable(dmu_buf_t *dbuf)
2574 {
2575 	boolean_t res = B_FALSE;
2576 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2577 
2578 	if (db->db_blkptr)
2579 		res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2580 		    db->db_blkptr, db->db_blkptr->blk_birth);
2581 
2582 	return (res);
2583 }
2584 
2585 blkptr_t *
2586 dmu_buf_get_blkptr(dmu_buf_t *db)
2587 {
2588 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2589 	return (dbi->db_blkptr);
2590 }
2591 
2592 static void
2593 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2594 {
2595 	/* ASSERT(dmu_tx_is_syncing(tx) */
2596 	ASSERT(MUTEX_HELD(&db->db_mtx));
2597 
2598 	if (db->db_blkptr != NULL)
2599 		return;
2600 
2601 	if (db->db_blkid == DMU_SPILL_BLKID) {
2602 		db->db_blkptr = &dn->dn_phys->dn_spill;
2603 		BP_ZERO(db->db_blkptr);
2604 		return;
2605 	}
2606 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2607 		/*
2608 		 * This buffer was allocated at a time when there was
2609 		 * no available blkptrs from the dnode, or it was
2610 		 * inappropriate to hook it in (i.e., nlevels mis-match).
2611 		 */
2612 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2613 		ASSERT(db->db_parent == NULL);
2614 		db->db_parent = dn->dn_dbuf;
2615 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2616 		DBUF_VERIFY(db);
2617 	} else {
2618 		dmu_buf_impl_t *parent = db->db_parent;
2619 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2620 
2621 		ASSERT(dn->dn_phys->dn_nlevels > 1);
2622 		if (parent == NULL) {
2623 			mutex_exit(&db->db_mtx);
2624 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
2625 			parent = dbuf_hold_level(dn, db->db_level + 1,
2626 			    db->db_blkid >> epbs, db);
2627 			rw_exit(&dn->dn_struct_rwlock);
2628 			mutex_enter(&db->db_mtx);
2629 			db->db_parent = parent;
2630 		}
2631 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
2632 		    (db->db_blkid & ((1ULL << epbs) - 1));
2633 		DBUF_VERIFY(db);
2634 	}
2635 }
2636 
2637 static void
2638 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2639 {
2640 	dmu_buf_impl_t *db = dr->dr_dbuf;
2641 	dnode_t *dn;
2642 	zio_t *zio;
2643 
2644 	ASSERT(dmu_tx_is_syncing(tx));
2645 
2646 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2647 
2648 	mutex_enter(&db->db_mtx);
2649 
2650 	ASSERT(db->db_level > 0);
2651 	DBUF_VERIFY(db);
2652 
2653 	/* Read the block if it hasn't been read yet. */
2654 	if (db->db_buf == NULL) {
2655 		mutex_exit(&db->db_mtx);
2656 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2657 		mutex_enter(&db->db_mtx);
2658 	}
2659 	ASSERT3U(db->db_state, ==, DB_CACHED);
2660 	ASSERT(db->db_buf != NULL);
2661 
2662 	DB_DNODE_ENTER(db);
2663 	dn = DB_DNODE(db);
2664 	/* Indirect block size must match what the dnode thinks it is. */
2665 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2666 	dbuf_check_blkptr(dn, db);
2667 	DB_DNODE_EXIT(db);
2668 
2669 	/* Provide the pending dirty record to child dbufs */
2670 	db->db_data_pending = dr;
2671 
2672 	mutex_exit(&db->db_mtx);
2673 	dbuf_write(dr, db->db_buf, tx);
2674 
2675 	zio = dr->dr_zio;
2676 	mutex_enter(&dr->dt.di.dr_mtx);
2677 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
2678 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2679 	mutex_exit(&dr->dt.di.dr_mtx);
2680 	zio_nowait(zio);
2681 }
2682 
2683 static void
2684 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2685 {
2686 	arc_buf_t **datap = &dr->dt.dl.dr_data;
2687 	dmu_buf_impl_t *db = dr->dr_dbuf;
2688 	dnode_t *dn;
2689 	objset_t *os;
2690 	uint64_t txg = tx->tx_txg;
2691 
2692 	ASSERT(dmu_tx_is_syncing(tx));
2693 
2694 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2695 
2696 	mutex_enter(&db->db_mtx);
2697 	/*
2698 	 * To be synced, we must be dirtied.  But we
2699 	 * might have been freed after the dirty.
2700 	 */
2701 	if (db->db_state == DB_UNCACHED) {
2702 		/* This buffer has been freed since it was dirtied */
2703 		ASSERT(db->db.db_data == NULL);
2704 	} else if (db->db_state == DB_FILL) {
2705 		/* This buffer was freed and is now being re-filled */
2706 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2707 	} else {
2708 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2709 	}
2710 	DBUF_VERIFY(db);
2711 
2712 	DB_DNODE_ENTER(db);
2713 	dn = DB_DNODE(db);
2714 
2715 	if (db->db_blkid == DMU_SPILL_BLKID) {
2716 		mutex_enter(&dn->dn_mtx);
2717 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2718 		mutex_exit(&dn->dn_mtx);
2719 	}
2720 
2721 	/*
2722 	 * If this is a bonus buffer, simply copy the bonus data into the
2723 	 * dnode.  It will be written out when the dnode is synced (and it
2724 	 * will be synced, since it must have been dirty for dbuf_sync to
2725 	 * be called).
2726 	 */
2727 	if (db->db_blkid == DMU_BONUS_BLKID) {
2728 		dbuf_dirty_record_t **drp;
2729 
2730 		ASSERT(*datap != NULL);
2731 		ASSERT0(db->db_level);
2732 		ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2733 		bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2734 		DB_DNODE_EXIT(db);
2735 
2736 		if (*datap != db->db.db_data) {
2737 			zio_buf_free(*datap, DN_MAX_BONUSLEN);
2738 			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2739 		}
2740 		db->db_data_pending = NULL;
2741 		drp = &db->db_last_dirty;
2742 		while (*drp != dr)
2743 			drp = &(*drp)->dr_next;
2744 		ASSERT(dr->dr_next == NULL);
2745 		ASSERT(dr->dr_dbuf == db);
2746 		*drp = dr->dr_next;
2747 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
2748 		ASSERT(db->db_dirtycnt > 0);
2749 		db->db_dirtycnt -= 1;
2750 		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2751 		return;
2752 	}
2753 
2754 	os = dn->dn_objset;
2755 
2756 	/*
2757 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
2758 	 * operation to sneak in. As a result, we need to ensure that we
2759 	 * don't check the dr_override_state until we have returned from
2760 	 * dbuf_check_blkptr.
2761 	 */
2762 	dbuf_check_blkptr(dn, db);
2763 
2764 	/*
2765 	 * If this buffer is in the middle of an immediate write,
2766 	 * wait for the synchronous IO to complete.
2767 	 */
2768 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2769 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2770 		cv_wait(&db->db_changed, &db->db_mtx);
2771 		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2772 	}
2773 
2774 	if (db->db_state != DB_NOFILL &&
2775 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2776 	    refcount_count(&db->db_holds) > 1 &&
2777 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2778 	    *datap == db->db_buf) {
2779 		/*
2780 		 * If this buffer is currently "in use" (i.e., there
2781 		 * are active holds and db_data still references it),
2782 		 * then make a copy before we start the write so that
2783 		 * any modifications from the open txg will not leak
2784 		 * into this write.
2785 		 *
2786 		 * NOTE: this copy does not need to be made for
2787 		 * objects only modified in the syncing context (e.g.
2788 		 * DNONE_DNODE blocks).
2789 		 */
2790 		int blksz = arc_buf_size(*datap);
2791 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2792 		*datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2793 		bcopy(db->db.db_data, (*datap)->b_data, blksz);
2794 	}
2795 	db->db_data_pending = dr;
2796 
2797 	mutex_exit(&db->db_mtx);
2798 
2799 	dbuf_write(dr, *datap, tx);
2800 
2801 	ASSERT(!list_link_active(&dr->dr_dirty_node));
2802 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2803 		list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2804 		DB_DNODE_EXIT(db);
2805 	} else {
2806 		/*
2807 		 * Although zio_nowait() does not "wait for an IO", it does
2808 		 * initiate the IO. If this is an empty write it seems plausible
2809 		 * that the IO could actually be completed before the nowait
2810 		 * returns. We need to DB_DNODE_EXIT() first in case
2811 		 * zio_nowait() invalidates the dbuf.
2812 		 */
2813 		DB_DNODE_EXIT(db);
2814 		zio_nowait(dr->dr_zio);
2815 	}
2816 }
2817 
2818 void
2819 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
2820 {
2821 	dbuf_dirty_record_t *dr;
2822 
2823 	while (dr = list_head(list)) {
2824 		if (dr->dr_zio != NULL) {
2825 			/*
2826 			 * If we find an already initialized zio then we
2827 			 * are processing the meta-dnode, and we have finished.
2828 			 * The dbufs for all dnodes are put back on the list
2829 			 * during processing, so that we can zio_wait()
2830 			 * these IOs after initiating all child IOs.
2831 			 */
2832 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2833 			    DMU_META_DNODE_OBJECT);
2834 			break;
2835 		}
2836 		if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
2837 		    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
2838 			VERIFY3U(dr->dr_dbuf->db_level, ==, level);
2839 		}
2840 		list_remove(list, dr);
2841 		if (dr->dr_dbuf->db_level > 0)
2842 			dbuf_sync_indirect(dr, tx);
2843 		else
2844 			dbuf_sync_leaf(dr, tx);
2845 	}
2846 }
2847 
2848 /* ARGSUSED */
2849 static void
2850 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2851 {
2852 	dmu_buf_impl_t *db = vdb;
2853 	dnode_t *dn;
2854 	blkptr_t *bp = zio->io_bp;
2855 	blkptr_t *bp_orig = &zio->io_bp_orig;
2856 	spa_t *spa = zio->io_spa;
2857 	int64_t delta;
2858 	uint64_t fill = 0;
2859 	int i;
2860 
2861 	ASSERT3P(db->db_blkptr, ==, bp);
2862 
2863 	DB_DNODE_ENTER(db);
2864 	dn = DB_DNODE(db);
2865 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2866 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2867 	zio->io_prev_space_delta = delta;
2868 
2869 	if (bp->blk_birth != 0) {
2870 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2871 		    BP_GET_TYPE(bp) == dn->dn_type) ||
2872 		    (db->db_blkid == DMU_SPILL_BLKID &&
2873 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
2874 		    BP_IS_EMBEDDED(bp));
2875 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2876 	}
2877 
2878 	mutex_enter(&db->db_mtx);
2879 
2880 #ifdef ZFS_DEBUG
2881 	if (db->db_blkid == DMU_SPILL_BLKID) {
2882 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2883 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2884 		    db->db_blkptr == &dn->dn_phys->dn_spill);
2885 	}
2886 #endif
2887 
2888 	if (db->db_level == 0) {
2889 		mutex_enter(&dn->dn_mtx);
2890 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2891 		    db->db_blkid != DMU_SPILL_BLKID)
2892 			dn->dn_phys->dn_maxblkid = db->db_blkid;
2893 		mutex_exit(&dn->dn_mtx);
2894 
2895 		if (dn->dn_type == DMU_OT_DNODE) {
2896 			dnode_phys_t *dnp = db->db.db_data;
2897 			for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2898 			    i--, dnp++) {
2899 				if (dnp->dn_type != DMU_OT_NONE)
2900 					fill++;
2901 			}
2902 		} else {
2903 			if (BP_IS_HOLE(bp)) {
2904 				fill = 0;
2905 			} else {
2906 				fill = 1;
2907 			}
2908 		}
2909 	} else {
2910 		blkptr_t *ibp = db->db.db_data;
2911 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2912 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2913 			if (BP_IS_HOLE(ibp))
2914 				continue;
2915 			fill += BP_GET_FILL(ibp);
2916 		}
2917 	}
2918 	DB_DNODE_EXIT(db);
2919 
2920 	if (!BP_IS_EMBEDDED(bp))
2921 		bp->blk_fill = fill;
2922 
2923 	mutex_exit(&db->db_mtx);
2924 }
2925 
2926 /*
2927  * The SPA will call this callback several times for each zio - once
2928  * for every physical child i/o (zio->io_phys_children times).  This
2929  * allows the DMU to monitor the progress of each logical i/o.  For example,
2930  * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2931  * block.  There may be a long delay before all copies/fragments are completed,
2932  * so this callback allows us to retire dirty space gradually, as the physical
2933  * i/os complete.
2934  */
2935 /* ARGSUSED */
2936 static void
2937 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2938 {
2939 	dmu_buf_impl_t *db = arg;
2940 	objset_t *os = db->db_objset;
2941 	dsl_pool_t *dp = dmu_objset_pool(os);
2942 	dbuf_dirty_record_t *dr;
2943 	int delta = 0;
2944 
2945 	dr = db->db_data_pending;
2946 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2947 
2948 	/*
2949 	 * The callback will be called io_phys_children times.  Retire one
2950 	 * portion of our dirty space each time we are called.  Any rounding
2951 	 * error will be cleaned up by dsl_pool_sync()'s call to
2952 	 * dsl_pool_undirty_space().
2953 	 */
2954 	delta = dr->dr_accounted / zio->io_phys_children;
2955 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
2956 }
2957 
2958 /* ARGSUSED */
2959 static void
2960 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2961 {
2962 	dmu_buf_impl_t *db = vdb;
2963 	blkptr_t *bp_orig = &zio->io_bp_orig;
2964 	blkptr_t *bp = db->db_blkptr;
2965 	objset_t *os = db->db_objset;
2966 	dmu_tx_t *tx = os->os_synctx;
2967 	dbuf_dirty_record_t **drp, *dr;
2968 
2969 	ASSERT0(zio->io_error);
2970 	ASSERT(db->db_blkptr == bp);
2971 
2972 	/*
2973 	 * For nopwrites and rewrites we ensure that the bp matches our
2974 	 * original and bypass all the accounting.
2975 	 */
2976 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2977 		ASSERT(BP_EQUAL(bp, bp_orig));
2978 	} else {
2979 		dsl_dataset_t *ds = os->os_dsl_dataset;
2980 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2981 		dsl_dataset_block_born(ds, bp, tx);
2982 	}
2983 
2984 	mutex_enter(&db->db_mtx);
2985 
2986 	DBUF_VERIFY(db);
2987 
2988 	drp = &db->db_last_dirty;
2989 	while ((dr = *drp) != db->db_data_pending)
2990 		drp = &dr->dr_next;
2991 	ASSERT(!list_link_active(&dr->dr_dirty_node));
2992 	ASSERT(dr->dr_dbuf == db);
2993 	ASSERT(dr->dr_next == NULL);
2994 	*drp = dr->dr_next;
2995 
2996 #ifdef ZFS_DEBUG
2997 	if (db->db_blkid == DMU_SPILL_BLKID) {
2998 		dnode_t *dn;
2999 
3000 		DB_DNODE_ENTER(db);
3001 		dn = DB_DNODE(db);
3002 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
3003 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
3004 		    db->db_blkptr == &dn->dn_phys->dn_spill);
3005 		DB_DNODE_EXIT(db);
3006 	}
3007 #endif
3008 
3009 	if (db->db_level == 0) {
3010 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3011 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
3012 		if (db->db_state != DB_NOFILL) {
3013 			if (dr->dt.dl.dr_data != db->db_buf)
3014 				VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
3015 				    db));
3016 			else if (!arc_released(db->db_buf))
3017 				arc_set_callback(db->db_buf, dbuf_do_evict, db);
3018 		}
3019 	} else {
3020 		dnode_t *dn;
3021 
3022 		DB_DNODE_ENTER(db);
3023 		dn = DB_DNODE(db);
3024 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3025 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
3026 		if (!BP_IS_HOLE(db->db_blkptr)) {
3027 			int epbs =
3028 			    dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3029 			ASSERT3U(db->db_blkid, <=,
3030 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
3031 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
3032 			    db->db.db_size);
3033 			if (!arc_released(db->db_buf))
3034 				arc_set_callback(db->db_buf, dbuf_do_evict, db);
3035 		}
3036 		DB_DNODE_EXIT(db);
3037 		mutex_destroy(&dr->dt.di.dr_mtx);
3038 		list_destroy(&dr->dt.di.dr_children);
3039 	}
3040 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
3041 
3042 	cv_broadcast(&db->db_changed);
3043 	ASSERT(db->db_dirtycnt > 0);
3044 	db->db_dirtycnt -= 1;
3045 	db->db_data_pending = NULL;
3046 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
3047 }
3048 
3049 static void
3050 dbuf_write_nofill_ready(zio_t *zio)
3051 {
3052 	dbuf_write_ready(zio, NULL, zio->io_private);
3053 }
3054 
3055 static void
3056 dbuf_write_nofill_done(zio_t *zio)
3057 {
3058 	dbuf_write_done(zio, NULL, zio->io_private);
3059 }
3060 
3061 static void
3062 dbuf_write_override_ready(zio_t *zio)
3063 {
3064 	dbuf_dirty_record_t *dr = zio->io_private;
3065 	dmu_buf_impl_t *db = dr->dr_dbuf;
3066 
3067 	dbuf_write_ready(zio, NULL, db);
3068 }
3069 
3070 static void
3071 dbuf_write_override_done(zio_t *zio)
3072 {
3073 	dbuf_dirty_record_t *dr = zio->io_private;
3074 	dmu_buf_impl_t *db = dr->dr_dbuf;
3075 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
3076 
3077 	mutex_enter(&db->db_mtx);
3078 	if (!BP_EQUAL(zio->io_bp, obp)) {
3079 		if (!BP_IS_HOLE(obp))
3080 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
3081 		arc_release(dr->dt.dl.dr_data, db);
3082 	}
3083 	mutex_exit(&db->db_mtx);
3084 
3085 	dbuf_write_done(zio, NULL, db);
3086 }
3087 
3088 /* Issue I/O to commit a dirty buffer to disk. */
3089 static void
3090 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
3091 {
3092 	dmu_buf_impl_t *db = dr->dr_dbuf;
3093 	dnode_t *dn;
3094 	objset_t *os;
3095 	dmu_buf_impl_t *parent = db->db_parent;
3096 	uint64_t txg = tx->tx_txg;
3097 	zbookmark_phys_t zb;
3098 	zio_prop_t zp;
3099 	zio_t *zio;
3100 	int wp_flag = 0;
3101 
3102 	DB_DNODE_ENTER(db);
3103 	dn = DB_DNODE(db);
3104 	os = dn->dn_objset;
3105 
3106 	if (db->db_state != DB_NOFILL) {
3107 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
3108 			/*
3109 			 * Private object buffers are released here rather
3110 			 * than in dbuf_dirty() since they are only modified
3111 			 * in the syncing context and we don't want the
3112 			 * overhead of making multiple copies of the data.
3113 			 */
3114 			if (BP_IS_HOLE(db->db_blkptr)) {
3115 				arc_buf_thaw(data);
3116 			} else {
3117 				dbuf_release_bp(db);
3118 			}
3119 		}
3120 	}
3121 
3122 	if (parent != dn->dn_dbuf) {
3123 		/* Our parent is an indirect block. */
3124 		/* We have a dirty parent that has been scheduled for write. */
3125 		ASSERT(parent && parent->db_data_pending);
3126 		/* Our parent's buffer is one level closer to the dnode. */
3127 		ASSERT(db->db_level == parent->db_level-1);
3128 		/*
3129 		 * We're about to modify our parent's db_data by modifying
3130 		 * our block pointer, so the parent must be released.
3131 		 */
3132 		ASSERT(arc_released(parent->db_buf));
3133 		zio = parent->db_data_pending->dr_zio;
3134 	} else {
3135 		/* Our parent is the dnode itself. */
3136 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
3137 		    db->db_blkid != DMU_SPILL_BLKID) ||
3138 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
3139 		if (db->db_blkid != DMU_SPILL_BLKID)
3140 			ASSERT3P(db->db_blkptr, ==,
3141 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
3142 		zio = dn->dn_zio;
3143 	}
3144 
3145 	ASSERT(db->db_level == 0 || data == db->db_buf);
3146 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
3147 	ASSERT(zio);
3148 
3149 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
3150 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
3151 	    db->db.db_object, db->db_level, db->db_blkid);
3152 
3153 	if (db->db_blkid == DMU_SPILL_BLKID)
3154 		wp_flag = WP_SPILL;
3155 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
3156 
3157 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
3158 	DB_DNODE_EXIT(db);
3159 
3160 	if (db->db_level == 0 &&
3161 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
3162 		/*
3163 		 * The BP for this block has been provided by open context
3164 		 * (by dmu_sync() or dmu_buf_write_embedded()).
3165 		 */
3166 		void *contents = (data != NULL) ? data->b_data : NULL;
3167 
3168 		dr->dr_zio = zio_write(zio, os->os_spa, txg,
3169 		    db->db_blkptr, contents, db->db.db_size, &zp,
3170 		    dbuf_write_override_ready, NULL, dbuf_write_override_done,
3171 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
3172 		mutex_enter(&db->db_mtx);
3173 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
3174 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
3175 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
3176 		mutex_exit(&db->db_mtx);
3177 	} else if (db->db_state == DB_NOFILL) {
3178 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
3179 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
3180 		dr->dr_zio = zio_write(zio, os->os_spa, txg,
3181 		    db->db_blkptr, NULL, db->db.db_size, &zp,
3182 		    dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
3183 		    ZIO_PRIORITY_ASYNC_WRITE,
3184 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
3185 	} else {
3186 		ASSERT(arc_released(data));
3187 		dr->dr_zio = arc_write(zio, os->os_spa, txg,
3188 		    db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
3189 		    DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
3190 		    dbuf_write_physdone, dbuf_write_done, db,
3191 		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
3192 	}
3193 }
3194