xref: /illumos-gate/usr/src/lib/libzfs_core/common/libzfs_core.c (revision dcbf3bd6a1f1360fc1afcee9e22c6dcff7844bf2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 /*
29  * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
30  * It has the following characteristics:
31  *
32  *  - Thread Safe.  libzfs_core is accessible concurrently from multiple
33  *  threads.  This is accomplished primarily by avoiding global data
34  *  (e.g. caching).  Since it's thread-safe, there is no reason for a
35  *  process to have multiple libzfs "instances".  Therefore, we store
36  *  our few pieces of data (e.g. the file descriptor) in global
37  *  variables.  The fd is reference-counted so that the libzfs_core
38  *  library can be "initialized" multiple times (e.g. by different
39  *  consumers within the same process).
40  *
41  *  - Committed Interface.  The libzfs_core interface will be committed,
42  *  therefore consumers can compile against it and be confident that
43  *  their code will continue to work on future releases of this code.
44  *  Currently, the interface is Evolving (not Committed), but we intend
45  *  to commit to it once it is more complete and we determine that it
46  *  meets the needs of all consumers.
47  *
48  *  - Programatic Error Handling.  libzfs_core communicates errors with
49  *  defined error numbers, and doesn't print anything to stdout/stderr.
50  *
51  *  - Thin Layer.  libzfs_core is a thin layer, marshaling arguments
52  *  to/from the kernel ioctls.  There is generally a 1:1 correspondence
53  *  between libzfs_core functions and ioctls to /dev/zfs.
54  *
55  *  - Clear Atomicity.  Because libzfs_core functions are generally 1:1
56  *  with kernel ioctls, and kernel ioctls are general atomic, each
57  *  libzfs_core function is atomic.  For example, creating multiple
58  *  snapshots with a single call to lzc_snapshot() is atomic -- it
59  *  can't fail with only some of the requested snapshots created, even
60  *  in the event of power loss or system crash.
61  *
62  *  - Continued libzfs Support.  Some higher-level operations (e.g.
63  *  support for "zfs send -R") are too complicated to fit the scope of
64  *  libzfs_core.  This functionality will continue to live in libzfs.
65  *  Where appropriate, libzfs will use the underlying atomic operations
66  *  of libzfs_core.  For example, libzfs may implement "zfs send -R |
67  *  zfs receive" by using individual "send one snapshot", rename,
68  *  destroy, and "receive one snapshot" operations in libzfs_core.
69  *  /sbin/zfs and /zbin/zpool will link with both libzfs and
70  *  libzfs_core.  Other consumers should aim to use only libzfs_core,
71  *  since that will be the supported, stable interface going forwards.
72  */
73 
74 #include <libzfs_core.h>
75 #include <ctype.h>
76 #include <unistd.h>
77 #include <stdlib.h>
78 #include <string.h>
79 #include <errno.h>
80 #include <fcntl.h>
81 #include <pthread.h>
82 #include <sys/nvpair.h>
83 #include <sys/param.h>
84 #include <sys/types.h>
85 #include <sys/stat.h>
86 #include <sys/zfs_ioctl.h>
87 
88 static int g_fd;
89 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
90 static int g_refcount;
91 
92 int
93 libzfs_core_init(void)
94 {
95 	(void) pthread_mutex_lock(&g_lock);
96 	if (g_refcount == 0) {
97 		g_fd = open("/dev/zfs", O_RDWR);
98 		if (g_fd < 0) {
99 			(void) pthread_mutex_unlock(&g_lock);
100 			return (errno);
101 		}
102 	}
103 	g_refcount++;
104 	(void) pthread_mutex_unlock(&g_lock);
105 	return (0);
106 }
107 
108 void
109 libzfs_core_fini(void)
110 {
111 	(void) pthread_mutex_lock(&g_lock);
112 	ASSERT3S(g_refcount, >, 0);
113 	g_refcount--;
114 	if (g_refcount == 0)
115 		(void) close(g_fd);
116 	(void) pthread_mutex_unlock(&g_lock);
117 }
118 
119 static int
120 lzc_ioctl(zfs_ioc_t ioc, const char *name,
121     nvlist_t *source, nvlist_t **resultp)
122 {
123 	zfs_cmd_t zc = { 0 };
124 	int error = 0;
125 	char *packed;
126 	size_t size;
127 
128 	ASSERT3S(g_refcount, >, 0);
129 
130 	(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
131 
132 	packed = fnvlist_pack(source, &size);
133 	zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
134 	zc.zc_nvlist_src_size = size;
135 
136 	if (resultp != NULL) {
137 		*resultp = NULL;
138 		zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
139 		zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
140 		    malloc(zc.zc_nvlist_dst_size);
141 		if (zc.zc_nvlist_dst == NULL) {
142 			error = ENOMEM;
143 			goto out;
144 		}
145 	}
146 
147 	while (ioctl(g_fd, ioc, &zc) != 0) {
148 		if (errno == ENOMEM && resultp != NULL) {
149 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
150 			zc.zc_nvlist_dst_size *= 2;
151 			zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
152 			    malloc(zc.zc_nvlist_dst_size);
153 			if (zc.zc_nvlist_dst == NULL) {
154 				error = ENOMEM;
155 				goto out;
156 			}
157 		} else {
158 			error = errno;
159 			break;
160 		}
161 	}
162 	if (zc.zc_nvlist_dst_filled) {
163 		*resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
164 		    zc.zc_nvlist_dst_size);
165 	}
166 
167 out:
168 	fnvlist_pack_free(packed, size);
169 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
170 	return (error);
171 }
172 
173 int
174 lzc_create(const char *fsname, enum lzc_dataset_type type, nvlist_t *props)
175 {
176 	int error;
177 	nvlist_t *args = fnvlist_alloc();
178 	fnvlist_add_int32(args, "type", (dmu_objset_type_t)type);
179 	if (props != NULL)
180 		fnvlist_add_nvlist(args, "props", props);
181 	error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
182 	nvlist_free(args);
183 	return (error);
184 }
185 
186 int
187 lzc_clone(const char *fsname, const char *origin,
188     nvlist_t *props)
189 {
190 	int error;
191 	nvlist_t *args = fnvlist_alloc();
192 	fnvlist_add_string(args, "origin", origin);
193 	if (props != NULL)
194 		fnvlist_add_nvlist(args, "props", props);
195 	error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
196 	nvlist_free(args);
197 	return (error);
198 }
199 
200 /*
201  * Creates snapshots.
202  *
203  * The keys in the snaps nvlist are the snapshots to be created.
204  * They must all be in the same pool.
205  *
206  * The props nvlist is properties to set.  Currently only user properties
207  * are supported.  { user:prop_name -> string value }
208  *
209  * The returned results nvlist will have an entry for each snapshot that failed.
210  * The value will be the (int32) error code.
211  *
212  * The return value will be 0 if all snapshots were created, otherwise it will
213  * be the errno of a (unspecified) snapshot that failed.
214  */
215 int
216 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
217 {
218 	nvpair_t *elem;
219 	nvlist_t *args;
220 	int error;
221 	char pool[ZFS_MAX_DATASET_NAME_LEN];
222 
223 	*errlist = NULL;
224 
225 	/* determine the pool name */
226 	elem = nvlist_next_nvpair(snaps, NULL);
227 	if (elem == NULL)
228 		return (0);
229 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
230 	pool[strcspn(pool, "/@")] = '\0';
231 
232 	args = fnvlist_alloc();
233 	fnvlist_add_nvlist(args, "snaps", snaps);
234 	if (props != NULL)
235 		fnvlist_add_nvlist(args, "props", props);
236 
237 	error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
238 	nvlist_free(args);
239 
240 	return (error);
241 }
242 
243 /*
244  * Destroys snapshots.
245  *
246  * The keys in the snaps nvlist are the snapshots to be destroyed.
247  * They must all be in the same pool.
248  *
249  * Snapshots that do not exist will be silently ignored.
250  *
251  * If 'defer' is not set, and a snapshot has user holds or clones, the
252  * destroy operation will fail and none of the snapshots will be
253  * destroyed.
254  *
255  * If 'defer' is set, and a snapshot has user holds or clones, it will be
256  * marked for deferred destruction, and will be destroyed when the last hold
257  * or clone is removed/destroyed.
258  *
259  * The return value will be 0 if all snapshots were destroyed (or marked for
260  * later destruction if 'defer' is set) or didn't exist to begin with.
261  *
262  * Otherwise the return value will be the errno of a (unspecified) snapshot
263  * that failed, no snapshots will be destroyed, and the errlist will have an
264  * entry for each snapshot that failed.  The value in the errlist will be
265  * the (int32) error code.
266  */
267 int
268 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
269 {
270 	nvpair_t *elem;
271 	nvlist_t *args;
272 	int error;
273 	char pool[ZFS_MAX_DATASET_NAME_LEN];
274 
275 	/* determine the pool name */
276 	elem = nvlist_next_nvpair(snaps, NULL);
277 	if (elem == NULL)
278 		return (0);
279 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
280 	pool[strcspn(pool, "/@")] = '\0';
281 
282 	args = fnvlist_alloc();
283 	fnvlist_add_nvlist(args, "snaps", snaps);
284 	if (defer)
285 		fnvlist_add_boolean(args, "defer");
286 
287 	error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
288 	nvlist_free(args);
289 
290 	return (error);
291 }
292 
293 int
294 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
295     uint64_t *usedp)
296 {
297 	nvlist_t *args;
298 	nvlist_t *result;
299 	int err;
300 	char fs[ZFS_MAX_DATASET_NAME_LEN];
301 	char *atp;
302 
303 	/* determine the fs name */
304 	(void) strlcpy(fs, firstsnap, sizeof (fs));
305 	atp = strchr(fs, '@');
306 	if (atp == NULL)
307 		return (EINVAL);
308 	*atp = '\0';
309 
310 	args = fnvlist_alloc();
311 	fnvlist_add_string(args, "firstsnap", firstsnap);
312 
313 	err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
314 	nvlist_free(args);
315 	if (err == 0)
316 		*usedp = fnvlist_lookup_uint64(result, "used");
317 	fnvlist_free(result);
318 
319 	return (err);
320 }
321 
322 boolean_t
323 lzc_exists(const char *dataset)
324 {
325 	/*
326 	 * The objset_stats ioctl is still legacy, so we need to construct our
327 	 * own zfs_cmd_t rather than using zfsc_ioctl().
328 	 */
329 	zfs_cmd_t zc = { 0 };
330 
331 	(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
332 	return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
333 }
334 
335 /*
336  * Create "user holds" on snapshots.  If there is a hold on a snapshot,
337  * the snapshot can not be destroyed.  (However, it can be marked for deletion
338  * by lzc_destroy_snaps(defer=B_TRUE).)
339  *
340  * The keys in the nvlist are snapshot names.
341  * The snapshots must all be in the same pool.
342  * The value is the name of the hold (string type).
343  *
344  * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
345  * In this case, when the cleanup_fd is closed (including on process
346  * termination), the holds will be released.  If the system is shut down
347  * uncleanly, the holds will be released when the pool is next opened
348  * or imported.
349  *
350  * Holds for snapshots which don't exist will be skipped and have an entry
351  * added to errlist, but will not cause an overall failure.
352  *
353  * The return value will be 0 if all holds, for snapshots that existed,
354  * were succesfully created.
355  *
356  * Otherwise the return value will be the errno of a (unspecified) hold that
357  * failed and no holds will be created.
358  *
359  * In all cases the errlist will have an entry for each hold that failed
360  * (name = snapshot), with its value being the error code (int32).
361  */
362 int
363 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
364 {
365 	char pool[ZFS_MAX_DATASET_NAME_LEN];
366 	nvlist_t *args;
367 	nvpair_t *elem;
368 	int error;
369 
370 	/* determine the pool name */
371 	elem = nvlist_next_nvpair(holds, NULL);
372 	if (elem == NULL)
373 		return (0);
374 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
375 	pool[strcspn(pool, "/@")] = '\0';
376 
377 	args = fnvlist_alloc();
378 	fnvlist_add_nvlist(args, "holds", holds);
379 	if (cleanup_fd != -1)
380 		fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
381 
382 	error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
383 	nvlist_free(args);
384 	return (error);
385 }
386 
387 /*
388  * Release "user holds" on snapshots.  If the snapshot has been marked for
389  * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
390  * any clones, and all the user holds are removed, then the snapshot will be
391  * destroyed.
392  *
393  * The keys in the nvlist are snapshot names.
394  * The snapshots must all be in the same pool.
395  * The value is a nvlist whose keys are the holds to remove.
396  *
397  * Holds which failed to release because they didn't exist will have an entry
398  * added to errlist, but will not cause an overall failure.
399  *
400  * The return value will be 0 if the nvl holds was empty or all holds that
401  * existed, were successfully removed.
402  *
403  * Otherwise the return value will be the errno of a (unspecified) hold that
404  * failed to release and no holds will be released.
405  *
406  * In all cases the errlist will have an entry for each hold that failed to
407  * to release.
408  */
409 int
410 lzc_release(nvlist_t *holds, nvlist_t **errlist)
411 {
412 	char pool[ZFS_MAX_DATASET_NAME_LEN];
413 	nvpair_t *elem;
414 
415 	/* determine the pool name */
416 	elem = nvlist_next_nvpair(holds, NULL);
417 	if (elem == NULL)
418 		return (0);
419 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
420 	pool[strcspn(pool, "/@")] = '\0';
421 
422 	return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
423 }
424 
425 /*
426  * Retrieve list of user holds on the specified snapshot.
427  *
428  * On success, *holdsp will be set to a nvlist which the caller must free.
429  * The keys are the names of the holds, and the value is the creation time
430  * of the hold (uint64) in seconds since the epoch.
431  */
432 int
433 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
434 {
435 	int error;
436 	nvlist_t *innvl = fnvlist_alloc();
437 	error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
438 	fnvlist_free(innvl);
439 	return (error);
440 }
441 
442 /*
443  * Generate a zfs send stream for the specified snapshot and write it to
444  * the specified file descriptor.
445  *
446  * "snapname" is the full name of the snapshot to send (e.g. "pool/fs@snap")
447  *
448  * If "from" is NULL, a full (non-incremental) stream will be sent.
449  * If "from" is non-NULL, it must be the full name of a snapshot or
450  * bookmark to send an incremental from (e.g. "pool/fs@earlier_snap" or
451  * "pool/fs#earlier_bmark").  If non-NULL, the specified snapshot or
452  * bookmark must represent an earlier point in the history of "snapname").
453  * It can be an earlier snapshot in the same filesystem or zvol as "snapname",
454  * or it can be the origin of "snapname"'s filesystem, or an earlier
455  * snapshot in the origin, etc.
456  *
457  * "fd" is the file descriptor to write the send stream to.
458  *
459  * If "flags" contains LZC_SEND_FLAG_LARGE_BLOCK, the stream is permitted
460  * to contain DRR_WRITE records with drr_length > 128K, and DRR_OBJECT
461  * records with drr_blksz > 128K.
462  *
463  * If "flags" contains LZC_SEND_FLAG_EMBED_DATA, the stream is permitted
464  * to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
465  * which the receiving system must support (as indicated by support
466  * for the "embedded_data" feature).
467  */
468 int
469 lzc_send(const char *snapname, const char *from, int fd,
470     enum lzc_send_flags flags)
471 {
472 	return (lzc_send_resume(snapname, from, fd, flags, 0, 0));
473 }
474 
475 int
476 lzc_send_resume(const char *snapname, const char *from, int fd,
477     enum lzc_send_flags flags, uint64_t resumeobj, uint64_t resumeoff)
478 {
479 	nvlist_t *args;
480 	int err;
481 
482 	args = fnvlist_alloc();
483 	fnvlist_add_int32(args, "fd", fd);
484 	if (from != NULL)
485 		fnvlist_add_string(args, "fromsnap", from);
486 	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
487 		fnvlist_add_boolean(args, "largeblockok");
488 	if (flags & LZC_SEND_FLAG_EMBED_DATA)
489 		fnvlist_add_boolean(args, "embedok");
490 	if (resumeobj != 0 || resumeoff != 0) {
491 		fnvlist_add_uint64(args, "resume_object", resumeobj);
492 		fnvlist_add_uint64(args, "resume_offset", resumeoff);
493 	}
494 	err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
495 	nvlist_free(args);
496 	return (err);
497 }
498 
499 /*
500  * "from" can be NULL, a snapshot, or a bookmark.
501  *
502  * If from is NULL, a full (non-incremental) stream will be estimated.  This
503  * is calculated very efficiently.
504  *
505  * If from is a snapshot, lzc_send_space uses the deadlists attached to
506  * each snapshot to efficiently estimate the stream size.
507  *
508  * If from is a bookmark, the indirect blocks in the destination snapshot
509  * are traversed, looking for blocks with a birth time since the creation TXG of
510  * the snapshot this bookmark was created from.  This will result in
511  * significantly more I/O and be less efficient than a send space estimation on
512  * an equivalent snapshot.
513  */
514 int
515 lzc_send_space(const char *snapname, const char *from, uint64_t *spacep)
516 {
517 	nvlist_t *args;
518 	nvlist_t *result;
519 	int err;
520 
521 	args = fnvlist_alloc();
522 	if (from != NULL)
523 		fnvlist_add_string(args, "from", from);
524 	err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
525 	nvlist_free(args);
526 	if (err == 0)
527 		*spacep = fnvlist_lookup_uint64(result, "space");
528 	nvlist_free(result);
529 	return (err);
530 }
531 
532 static int
533 recv_read(int fd, void *buf, int ilen)
534 {
535 	char *cp = buf;
536 	int rv;
537 	int len = ilen;
538 
539 	do {
540 		rv = read(fd, cp, len);
541 		cp += rv;
542 		len -= rv;
543 	} while (rv > 0);
544 
545 	if (rv < 0 || len != 0)
546 		return (EIO);
547 
548 	return (0);
549 }
550 
551 static int
552 recv_impl(const char *snapname, nvlist_t *props, const char *origin,
553     boolean_t force, boolean_t resumable, int fd,
554     const dmu_replay_record_t *begin_record)
555 {
556 	/*
557 	 * The receive ioctl is still legacy, so we need to construct our own
558 	 * zfs_cmd_t rather than using zfsc_ioctl().
559 	 */
560 	zfs_cmd_t zc = { 0 };
561 	char *atp;
562 	char *packed = NULL;
563 	size_t size;
564 	int error;
565 
566 	ASSERT3S(g_refcount, >, 0);
567 
568 	/* zc_name is name of containing filesystem */
569 	(void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
570 	atp = strchr(zc.zc_name, '@');
571 	if (atp == NULL)
572 		return (EINVAL);
573 	*atp = '\0';
574 
575 	/* if the fs does not exist, try its parent. */
576 	if (!lzc_exists(zc.zc_name)) {
577 		char *slashp = strrchr(zc.zc_name, '/');
578 		if (slashp == NULL)
579 			return (ENOENT);
580 		*slashp = '\0';
581 
582 	}
583 
584 	/* zc_value is full name of the snapshot to create */
585 	(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
586 
587 	if (props != NULL) {
588 		/* zc_nvlist_src is props to set */
589 		packed = fnvlist_pack(props, &size);
590 		zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
591 		zc.zc_nvlist_src_size = size;
592 	}
593 
594 	/* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
595 	if (origin != NULL)
596 		(void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
597 
598 	/* zc_begin_record is non-byteswapped BEGIN record */
599 	if (begin_record == NULL) {
600 		error = recv_read(fd, &zc.zc_begin_record,
601 		    sizeof (zc.zc_begin_record));
602 		if (error != 0)
603 			goto out;
604 	} else {
605 		zc.zc_begin_record = *begin_record;
606 	}
607 
608 	/* zc_cookie is fd to read from */
609 	zc.zc_cookie = fd;
610 
611 	/* zc guid is force flag */
612 	zc.zc_guid = force;
613 
614 	zc.zc_resumable = resumable;
615 
616 	/* zc_cleanup_fd is unused */
617 	zc.zc_cleanup_fd = -1;
618 
619 	error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
620 	if (error != 0)
621 		error = errno;
622 
623 out:
624 	if (packed != NULL)
625 		fnvlist_pack_free(packed, size);
626 	free((void*)(uintptr_t)zc.zc_nvlist_dst);
627 	return (error);
628 }
629 
630 /*
631  * The simplest receive case: receive from the specified fd, creating the
632  * specified snapshot.  Apply the specified properties as "received" properties
633  * (which can be overridden by locally-set properties).  If the stream is a
634  * clone, its origin snapshot must be specified by 'origin'.  The 'force'
635  * flag will cause the target filesystem to be rolled back or destroyed if
636  * necessary to receive.
637  *
638  * Return 0 on success or an errno on failure.
639  *
640  * Note: this interface does not work on dedup'd streams
641  * (those with DMU_BACKUP_FEATURE_DEDUP).
642  */
643 int
644 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
645     boolean_t force, int fd)
646 {
647 	return (recv_impl(snapname, props, origin, force, B_FALSE, fd, NULL));
648 }
649 
650 /*
651  * Like lzc_receive, but if the receive fails due to premature stream
652  * termination, the intermediate state will be preserved on disk.  In this
653  * case, ECKSUM will be returned.  The receive may subsequently be resumed
654  * with a resuming send stream generated by lzc_send_resume().
655  */
656 int
657 lzc_receive_resumable(const char *snapname, nvlist_t *props, const char *origin,
658     boolean_t force, int fd)
659 {
660 	return (recv_impl(snapname, props, origin, force, B_TRUE, fd, NULL));
661 }
662 
663 /*
664  * Like lzc_receive, but allows the caller to read the begin record and then to
665  * pass it in.  That could be useful if the caller wants to derive, for example,
666  * the snapname or the origin parameters based on the information contained in
667  * the begin record.
668  * The begin record must be in its original form as read from the stream,
669  * in other words, it should not be byteswapped.
670  *
671  * The 'resumable' parameter allows to obtain the same behavior as with
672  * lzc_receive_resumable.
673  */
674 int
675 lzc_receive_with_header(const char *snapname, nvlist_t *props,
676     const char *origin, boolean_t force, boolean_t resumable, int fd,
677     const dmu_replay_record_t *begin_record)
678 {
679 	if (begin_record == NULL)
680 		return (EINVAL);
681 	return (recv_impl(snapname, props, origin, force, resumable, fd,
682 	    begin_record));
683 }
684 
685 /*
686  * Roll back this filesystem or volume to its most recent snapshot.
687  * If snapnamebuf is not NULL, it will be filled in with the name
688  * of the most recent snapshot.
689  *
690  * Return 0 on success or an errno on failure.
691  */
692 int
693 lzc_rollback(const char *fsname, char *snapnamebuf, int snapnamelen)
694 {
695 	nvlist_t *args;
696 	nvlist_t *result;
697 	int err;
698 
699 	args = fnvlist_alloc();
700 	err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
701 	nvlist_free(args);
702 	if (err == 0 && snapnamebuf != NULL) {
703 		const char *snapname = fnvlist_lookup_string(result, "target");
704 		(void) strlcpy(snapnamebuf, snapname, snapnamelen);
705 	}
706 	return (err);
707 }
708 
709 /*
710  * Creates bookmarks.
711  *
712  * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
713  * the name of the snapshot (e.g. "pool/fs@snap").  All the bookmarks and
714  * snapshots must be in the same pool.
715  *
716  * The returned results nvlist will have an entry for each bookmark that failed.
717  * The value will be the (int32) error code.
718  *
719  * The return value will be 0 if all bookmarks were created, otherwise it will
720  * be the errno of a (undetermined) bookmarks that failed.
721  */
722 int
723 lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
724 {
725 	nvpair_t *elem;
726 	int error;
727 	char pool[ZFS_MAX_DATASET_NAME_LEN];
728 
729 	/* determine the pool name */
730 	elem = nvlist_next_nvpair(bookmarks, NULL);
731 	if (elem == NULL)
732 		return (0);
733 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
734 	pool[strcspn(pool, "/#")] = '\0';
735 
736 	error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);
737 
738 	return (error);
739 }
740 
741 /*
742  * Retrieve bookmarks.
743  *
744  * Retrieve the list of bookmarks for the given file system. The props
745  * parameter is an nvlist of property names (with no values) that will be
746  * returned for each bookmark.
747  *
748  * The following are valid properties on bookmarks, all of which are numbers
749  * (represented as uint64 in the nvlist)
750  *
751  * "guid" - globally unique identifier of the snapshot it refers to
752  * "createtxg" - txg when the snapshot it refers to was created
753  * "creation" - timestamp when the snapshot it refers to was created
754  *
755  * The format of the returned nvlist as follows:
756  * <short name of bookmark> -> {
757  *     <name of property> -> {
758  *         "value" -> uint64
759  *     }
760  *  }
761  */
762 int
763 lzc_get_bookmarks(const char *fsname, nvlist_t *props, nvlist_t **bmarks)
764 {
765 	return (lzc_ioctl(ZFS_IOC_GET_BOOKMARKS, fsname, props, bmarks));
766 }
767 
768 /*
769  * Destroys bookmarks.
770  *
771  * The keys in the bmarks nvlist are the bookmarks to be destroyed.
772  * They must all be in the same pool.  Bookmarks are specified as
773  * <fs>#<bmark>.
774  *
775  * Bookmarks that do not exist will be silently ignored.
776  *
777  * The return value will be 0 if all bookmarks that existed were destroyed.
778  *
779  * Otherwise the return value will be the errno of a (undetermined) bookmark
780  * that failed, no bookmarks will be destroyed, and the errlist will have an
781  * entry for each bookmarks that failed.  The value in the errlist will be
782  * the (int32) error code.
783  */
784 int
785 lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
786 {
787 	nvpair_t *elem;
788 	int error;
789 	char pool[ZFS_MAX_DATASET_NAME_LEN];
790 
791 	/* determine the pool name */
792 	elem = nvlist_next_nvpair(bmarks, NULL);
793 	if (elem == NULL)
794 		return (0);
795 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
796 	pool[strcspn(pool, "/#")] = '\0';
797 
798 	error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);
799 
800 	return (error);
801 }
802