xref: /illumos-gate/usr/src/uts/common/fs/zfs/sys/spa_impl.h (revision 5f82aa32fbc5dc2c59bca6ff315f44a4c4c9ea86)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  * Copyright 2013 Saso Kiselkov. All rights reserved.
27  * Copyright (c) 2017 Datto Inc.
28  */
29 
30 #ifndef _SYS_SPA_IMPL_H
31 #define	_SYS_SPA_IMPL_H
32 
33 #include <sys/spa.h>
34 #include <sys/vdev.h>
35 #include <sys/metaslab.h>
36 #include <sys/dmu.h>
37 #include <sys/dsl_pool.h>
38 #include <sys/uberblock_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/avl.h>
41 #include <sys/refcount.h>
42 #include <sys/bplist.h>
43 #include <sys/bpobj.h>
44 #include <sys/zfeature.h>
45 #include <zfeature_common.h>
46 
47 #ifdef	__cplusplus
48 extern "C" {
49 #endif
50 
51 typedef struct spa_error_entry {
52 	zbookmark_phys_t	se_bookmark;
53 	char			*se_name;
54 	avl_node_t		se_avl;
55 } spa_error_entry_t;
56 
57 typedef struct spa_history_phys {
58 	uint64_t sh_pool_create_len;	/* ending offset of zpool create */
59 	uint64_t sh_phys_max_off;	/* physical EOF */
60 	uint64_t sh_bof;		/* logical BOF */
61 	uint64_t sh_eof;		/* logical EOF */
62 	uint64_t sh_records_lost;	/* num of records overwritten */
63 } spa_history_phys_t;
64 
65 struct spa_aux_vdev {
66 	uint64_t	sav_object;		/* MOS object for device list */
67 	nvlist_t	*sav_config;		/* cached device config */
68 	vdev_t		**sav_vdevs;		/* devices */
69 	int		sav_count;		/* number devices */
70 	boolean_t	sav_sync;		/* sync the device list */
71 	nvlist_t	**sav_pending;		/* pending device additions */
72 	uint_t		sav_npending;		/* # pending devices */
73 };
74 
75 typedef struct spa_config_lock {
76 	kmutex_t	scl_lock;
77 	kthread_t	*scl_writer;
78 	int		scl_write_wanted;
79 	kcondvar_t	scl_cv;
80 	refcount_t	scl_count;
81 } spa_config_lock_t;
82 
83 typedef struct spa_config_dirent {
84 	list_node_t	scd_link;
85 	char		*scd_path;
86 } spa_config_dirent_t;
87 
88 typedef enum zio_taskq_type {
89 	ZIO_TASKQ_ISSUE = 0,
90 	ZIO_TASKQ_ISSUE_HIGH,
91 	ZIO_TASKQ_INTERRUPT,
92 	ZIO_TASKQ_INTERRUPT_HIGH,
93 	ZIO_TASKQ_TYPES
94 } zio_taskq_type_t;
95 
96 /*
97  * State machine for the zpool-poolname process.  The states transitions
98  * are done as follows:
99  *
100  *	From		   To			Routine
101  *	PROC_NONE	-> PROC_CREATED		spa_activate()
102  *	PROC_CREATED	-> PROC_ACTIVE		spa_thread()
103  *	PROC_ACTIVE	-> PROC_DEACTIVATE	spa_deactivate()
104  *	PROC_DEACTIVATE	-> PROC_GONE		spa_thread()
105  *	PROC_GONE	-> PROC_NONE		spa_deactivate()
106  */
107 typedef enum spa_proc_state {
108 	SPA_PROC_NONE,		/* spa_proc = &p0, no process created */
109 	SPA_PROC_CREATED,	/* spa_activate() has proc, is waiting */
110 	SPA_PROC_ACTIVE,	/* taskqs created, spa_proc set */
111 	SPA_PROC_DEACTIVATE,	/* spa_deactivate() requests process exit */
112 	SPA_PROC_GONE		/* spa_thread() is exiting, spa_proc = &p0 */
113 } spa_proc_state_t;
114 
115 typedef struct spa_taskqs {
116 	uint_t stqs_count;
117 	taskq_t **stqs_taskq;
118 } spa_taskqs_t;
119 
120 typedef enum spa_all_vdev_zap_action {
121 	AVZ_ACTION_NONE = 0,
122 	AVZ_ACTION_DESTROY,	/* Destroy all per-vdev ZAPs and the AVZ. */
123 	AVZ_ACTION_REBUILD,	/* Populate the new AVZ, see spa_avz_rebuild */
124 	AVZ_ACTION_INITIALIZE
125 } spa_avz_action_t;
126 
127 struct spa {
128 	/*
129 	 * Fields protected by spa_namespace_lock.
130 	 */
131 	char		spa_name[ZFS_MAX_DATASET_NAME_LEN];	/* pool name */
132 	char		*spa_comment;		/* comment */
133 	avl_node_t	spa_avl;		/* node in spa_namespace_avl */
134 	nvlist_t	*spa_config;		/* last synced config */
135 	nvlist_t	*spa_config_syncing;	/* currently syncing config */
136 	nvlist_t	*spa_config_splitting;	/* config for splitting */
137 	nvlist_t	*spa_load_info;		/* info and errors from load */
138 	uint64_t	spa_config_txg;		/* txg of last config change */
139 	int		spa_sync_pass;		/* iterate-to-convergence */
140 	pool_state_t	spa_state;		/* pool state */
141 	int		spa_inject_ref;		/* injection references */
142 	uint8_t		spa_sync_on;		/* sync threads are running */
143 	spa_load_state_t spa_load_state;	/* current load operation */
144 	uint64_t	spa_import_flags;	/* import specific flags */
145 	spa_taskqs_t	spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
146 	dsl_pool_t	*spa_dsl_pool;
147 	boolean_t	spa_is_initializing;	/* true while opening pool */
148 	metaslab_class_t *spa_normal_class;	/* normal data class */
149 	metaslab_class_t *spa_log_class;	/* intent log data class */
150 	uint64_t	spa_first_txg;		/* first txg after spa_open() */
151 	uint64_t	spa_final_txg;		/* txg of export/destroy */
152 	uint64_t	spa_freeze_txg;		/* freeze pool at this txg */
153 	uint64_t	spa_load_max_txg;	/* best initial ub_txg */
154 	uint64_t	spa_claim_max_txg;	/* highest claimed birth txg */
155 	timespec_t	spa_loaded_ts;		/* 1st successful open time */
156 	objset_t	*spa_meta_objset;	/* copy of dp->dp_meta_objset */
157 	kmutex_t	spa_evicting_os_lock;	/* Evicting objset list lock */
158 	list_t		spa_evicting_os_list;	/* Objsets being evicted. */
159 	kcondvar_t	spa_evicting_os_cv;	/* Objset Eviction Completion */
160 	txg_list_t	spa_vdev_txg_list;	/* per-txg dirty vdev list */
161 	vdev_t		*spa_root_vdev;		/* top-level vdev container */
162 	int		spa_min_ashift;		/* of vdevs in normal class */
163 	int		spa_max_ashift;		/* of vdevs in normal class */
164 	uint64_t	spa_config_guid;	/* config pool guid */
165 	uint64_t	spa_load_guid;		/* spa_load initialized guid */
166 	uint64_t	spa_last_synced_guid;	/* last synced guid */
167 	list_t		spa_config_dirty_list;	/* vdevs with dirty config */
168 	list_t		spa_state_dirty_list;	/* vdevs with dirty state */
169 	kmutex_t	spa_alloc_lock;
170 	avl_tree_t	spa_alloc_tree;
171 	spa_aux_vdev_t	spa_spares;		/* hot spares */
172 	spa_aux_vdev_t	spa_l2cache;		/* L2ARC cache devices */
173 	nvlist_t	*spa_label_features;	/* Features for reading MOS */
174 	uint64_t	spa_config_object;	/* MOS object for pool config */
175 	uint64_t	spa_config_generation;	/* config generation number */
176 	uint64_t	spa_syncing_txg;	/* txg currently syncing */
177 	bpobj_t		spa_deferred_bpobj;	/* deferred-free bplist */
178 	bplist_t	spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
179 	zio_cksum_salt_t spa_cksum_salt;	/* secret salt for cksum */
180 	/* checksum context templates */
181 	kmutex_t	spa_cksum_tmpls_lock;
182 	void		*spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
183 	uberblock_t	spa_ubsync;		/* last synced uberblock */
184 	uberblock_t	spa_uberblock;		/* current uberblock */
185 	boolean_t	spa_extreme_rewind;	/* rewind past deferred frees */
186 	uint64_t	spa_last_io;		/* lbolt of last non-scan I/O */
187 	kmutex_t	spa_scrub_lock;		/* resilver/scrub lock */
188 	uint64_t	spa_scrub_inflight;	/* in-flight scrub I/Os */
189 	kcondvar_t	spa_scrub_io_cv;	/* scrub I/O completion */
190 	uint8_t		spa_scrub_active;	/* active or suspended? */
191 	uint8_t		spa_scrub_type;		/* type of scrub we're doing */
192 	uint8_t		spa_scrub_finished;	/* indicator to rotate logs */
193 	uint8_t		spa_scrub_started;	/* started since last boot */
194 	uint8_t		spa_scrub_reopen;	/* scrub doing vdev_reopen */
195 	uint64_t	spa_scan_pass_start;	/* start time per pass/reboot */
196 	uint64_t	spa_scan_pass_scrub_pause; /* scrub pause time */
197 	uint64_t	spa_scan_pass_scrub_spent_paused; /* total paused */
198 	uint64_t	spa_scan_pass_exam;	/* examined bytes per pass */
199 	kmutex_t	spa_async_lock;		/* protect async state */
200 	kthread_t	*spa_async_thread;	/* thread doing async task */
201 	int		spa_async_suspended;	/* async tasks suspended */
202 	kcondvar_t	spa_async_cv;		/* wait for thread_exit() */
203 	uint16_t	spa_async_tasks;	/* async task mask */
204 	char		*spa_root;		/* alternate root directory */
205 	uint64_t	spa_ena;		/* spa-wide ereport ENA */
206 	int		spa_last_open_failed;	/* error if last open failed */
207 	uint64_t	spa_last_ubsync_txg;	/* "best" uberblock txg */
208 	uint64_t	spa_last_ubsync_txg_ts;	/* timestamp from that ub */
209 	uint64_t	spa_load_txg;		/* ub txg that loaded */
210 	uint64_t	spa_load_txg_ts;	/* timestamp from that ub */
211 	uint64_t	spa_load_meta_errors;	/* verify metadata err count */
212 	uint64_t	spa_load_data_errors;	/* verify data err count */
213 	uint64_t	spa_verify_min_txg;	/* start txg of verify scrub */
214 	kmutex_t	spa_errlog_lock;	/* error log lock */
215 	uint64_t	spa_errlog_last;	/* last error log object */
216 	uint64_t	spa_errlog_scrub;	/* scrub error log object */
217 	kmutex_t	spa_errlist_lock;	/* error list/ereport lock */
218 	avl_tree_t	spa_errlist_last;	/* last error list */
219 	avl_tree_t	spa_errlist_scrub;	/* scrub error list */
220 	uint64_t	spa_deflate;		/* should we deflate? */
221 	uint64_t	spa_history;		/* history object */
222 	kmutex_t	spa_history_lock;	/* history lock */
223 	vdev_t		*spa_pending_vdev;	/* pending vdev additions */
224 	kmutex_t	spa_props_lock;		/* property lock */
225 	uint64_t	spa_pool_props_object;	/* object for properties */
226 	uint64_t	spa_bootfs;		/* default boot filesystem */
227 	uint64_t	spa_failmode;		/* failure mode for the pool */
228 	uint64_t	spa_delegation;		/* delegation on/off */
229 	list_t		spa_config_list;	/* previous cache file(s) */
230 	/* per-CPU array of root of async I/O: */
231 	zio_t		**spa_async_zio_root;
232 	zio_t		*spa_suspend_zio_root;	/* root of all suspended I/O */
233 	kmutex_t	spa_suspend_lock;	/* protects suspend_zio_root */
234 	kcondvar_t	spa_suspend_cv;		/* notification of resume */
235 	uint8_t		spa_suspended;		/* pool is suspended */
236 	uint8_t		spa_claiming;		/* pool is doing zil_claim() */
237 	boolean_t	spa_debug;		/* debug enabled? */
238 	boolean_t	spa_is_root;		/* pool is root */
239 	int		spa_minref;		/* num refs when first opened */
240 	int		spa_mode;		/* FREAD | FWRITE */
241 	spa_log_state_t spa_log_state;		/* log state */
242 	uint64_t	spa_autoexpand;		/* lun expansion on/off */
243 	uint64_t	spa_bootsize;		/* efi system partition size */
244 	ddt_t		*spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
245 	uint64_t	spa_ddt_stat_object;	/* DDT statistics */
246 	uint64_t	spa_dedup_ditto;	/* dedup ditto threshold */
247 	uint64_t	spa_dedup_checksum;	/* default dedup checksum */
248 	uint64_t	spa_dspace;		/* dspace in normal class */
249 	kmutex_t	spa_vdev_top_lock;	/* dueling offline/remove */
250 	kmutex_t	spa_proc_lock;		/* protects spa_proc* */
251 	kcondvar_t	spa_proc_cv;		/* spa_proc_state transitions */
252 	spa_proc_state_t spa_proc_state;	/* see definition */
253 	struct proc	*spa_proc;		/* "zpool-poolname" process */
254 	uint64_t	spa_did;		/* if procp != p0, did of t1 */
255 	boolean_t	spa_autoreplace;	/* autoreplace set in open */
256 	int		spa_vdev_locks;		/* locks grabbed */
257 	uint64_t	spa_creation_version;	/* version at pool creation */
258 	uint64_t	spa_prev_software_version; /* See ub_software_version */
259 	uint64_t	spa_feat_for_write_obj;	/* required to write to pool */
260 	uint64_t	spa_feat_for_read_obj;	/* required to read from pool */
261 	uint64_t	spa_feat_desc_obj;	/* Feature descriptions */
262 	uint64_t	spa_feat_enabled_txg_obj; /* Feature enabled txg */
263 	/* cache feature refcounts */
264 	uint64_t	spa_feat_refcount_cache[SPA_FEATURES];
265 	cyclic_id_t	spa_deadman_cycid;	/* cyclic id */
266 	uint64_t	spa_deadman_calls;	/* number of deadman calls */
267 	hrtime_t	spa_sync_starttime;	/* starting time fo spa_sync */
268 	uint64_t	spa_deadman_synctime;	/* deadman expiration timer */
269 	uint64_t	spa_all_vdev_zaps;	/* ZAP of per-vd ZAP obj #s */
270 	spa_avz_action_t	spa_avz_action;	/* destroy/rebuild AVZ? */
271 
272 	/*
273 	 * spa_iokstat_lock protects spa_iokstat and
274 	 * spa_queue_stats[].
275 	 */
276 	kmutex_t	spa_iokstat_lock;
277 	struct kstat	*spa_iokstat;		/* kstat of io to this pool */
278 	struct {
279 		int spa_active;
280 		int spa_queued;
281 	} spa_queue_stats[ZIO_PRIORITY_NUM_QUEUEABLE];
282 
283 	hrtime_t	spa_ccw_fail_time;	/* Conf cache write fail time */
284 
285 	/*
286 	 * spa_refcount & spa_config_lock must be the last elements
287 	 * because refcount_t changes size based on compilation options.
288 	 * In order for the MDB module to function correctly, the other
289 	 * fields must remain in the same location.
290 	 */
291 	spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
292 	refcount_t	spa_refcount;		/* number of opens */
293 };
294 
295 extern const char *spa_config_path;
296 
297 extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
298     task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
299 
300 #ifdef	__cplusplus
301 }
302 #endif
303 
304 #endif	/* _SYS_SPA_IMPL_H */
305