xref: /linux/drivers/md/bcache/bcache.h (revision e2be04c7f9958dde770eeb8b30e829ca969b37bb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHE_H
3 #define _BCACHE_H
4 
5 /*
6  * SOME HIGH LEVEL CODE DOCUMENTATION:
7  *
8  * Bcache mostly works with cache sets, cache devices, and backing devices.
9  *
10  * Support for multiple cache devices hasn't quite been finished off yet, but
11  * it's about 95% plumbed through. A cache set and its cache devices is sort of
12  * like a md raid array and its component devices. Most of the code doesn't care
13  * about individual cache devices, the main abstraction is the cache set.
14  *
15  * Multiple cache devices is intended to give us the ability to mirror dirty
16  * cached data and metadata, without mirroring clean cached data.
17  *
18  * Backing devices are different, in that they have a lifetime independent of a
19  * cache set. When you register a newly formatted backing device it'll come up
20  * in passthrough mode, and then you can attach and detach a backing device from
21  * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22  * invalidates any cached data for that backing device.
23  *
24  * A cache set can have multiple (many) backing devices attached to it.
25  *
26  * There's also flash only volumes - this is the reason for the distinction
27  * between struct cached_dev and struct bcache_device. A flash only volume
28  * works much like a bcache device that has a backing device, except the
29  * "cached" data is always dirty. The end result is that we get thin
30  * provisioning with very little additional code.
31  *
32  * Flash only volumes work but they're not production ready because the moving
33  * garbage collector needs more work. More on that later.
34  *
35  * BUCKETS/ALLOCATION:
36  *
37  * Bcache is primarily designed for caching, which means that in normal
38  * operation all of our available space will be allocated. Thus, we need an
39  * efficient way of deleting things from the cache so we can write new things to
40  * it.
41  *
42  * To do this, we first divide the cache device up into buckets. A bucket is the
43  * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
44  * works efficiently.
45  *
46  * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47  * it. The gens and priorities for all the buckets are stored contiguously and
48  * packed on disk (in a linked list of buckets - aside from the superblock, all
49  * of bcache's metadata is stored in buckets).
50  *
51  * The priority is used to implement an LRU. We reset a bucket's priority when
52  * we allocate it or on cache it, and every so often we decrement the priority
53  * of each bucket. It could be used to implement something more sophisticated,
54  * if anyone ever gets around to it.
55  *
56  * The generation is used for invalidating buckets. Each pointer also has an 8
57  * bit generation embedded in it; for a pointer to be considered valid, its gen
58  * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
59  * we have to do is increment its gen (and write its new gen to disk; we batch
60  * this up).
61  *
62  * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63  * contain metadata (including btree nodes).
64  *
65  * THE BTREE:
66  *
67  * Bcache is in large part design around the btree.
68  *
69  * At a high level, the btree is just an index of key -> ptr tuples.
70  *
71  * Keys represent extents, and thus have a size field. Keys also have a variable
72  * number of pointers attached to them (potentially zero, which is handy for
73  * invalidating the cache).
74  *
75  * The key itself is an inode:offset pair. The inode number corresponds to a
76  * backing device or a flash only volume. The offset is the ending offset of the
77  * extent within the inode - not the starting offset; this makes lookups
78  * slightly more convenient.
79  *
80  * Pointers contain the cache device id, the offset on that device, and an 8 bit
81  * generation number. More on the gen later.
82  *
83  * Index lookups are not fully abstracted - cache lookups in particular are
84  * still somewhat mixed in with the btree code, but things are headed in that
85  * direction.
86  *
87  * Updates are fairly well abstracted, though. There are two different ways of
88  * updating the btree; insert and replace.
89  *
90  * BTREE_INSERT will just take a list of keys and insert them into the btree -
91  * overwriting (possibly only partially) any extents they overlap with. This is
92  * used to update the index after a write.
93  *
94  * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95  * overwriting a key that matches another given key. This is used for inserting
96  * data into the cache after a cache miss, and for background writeback, and for
97  * the moving garbage collector.
98  *
99  * There is no "delete" operation; deleting things from the index is
100  * accomplished by either by invalidating pointers (by incrementing a bucket's
101  * gen) or by inserting a key with 0 pointers - which will overwrite anything
102  * previously present at that location in the index.
103  *
104  * This means that there are always stale/invalid keys in the btree. They're
105  * filtered out by the code that iterates through a btree node, and removed when
106  * a btree node is rewritten.
107  *
108  * BTREE NODES:
109  *
110  * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
111  * free smaller than a bucket - so, that's how big our btree nodes are.
112  *
113  * (If buckets are really big we'll only use part of the bucket for a btree node
114  * - no less than 1/4th - but a bucket still contains no more than a single
115  * btree node. I'd actually like to change this, but for now we rely on the
116  * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117  *
118  * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119  * btree implementation.
120  *
121  * The way this is solved is that btree nodes are internally log structured; we
122  * can append new keys to an existing btree node without rewriting it. This
123  * means each set of keys we write is sorted, but the node is not.
124  *
125  * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126  * be expensive, and we have to distinguish between the keys we have written and
127  * the keys we haven't. So to do a lookup in a btree node, we have to search
128  * each sorted set. But we do merge written sets together lazily, so the cost of
129  * these extra searches is quite low (normally most of the keys in a btree node
130  * will be in one big set, and then there'll be one or two sets that are much
131  * smaller).
132  *
133  * This log structure makes bcache's btree more of a hybrid between a
134  * conventional btree and a compacting data structure, with some of the
135  * advantages of both.
136  *
137  * GARBAGE COLLECTION:
138  *
139  * We can't just invalidate any bucket - it might contain dirty data or
140  * metadata. If it once contained dirty data, other writes might overwrite it
141  * later, leaving no valid pointers into that bucket in the index.
142  *
143  * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144  * It also counts how much valid data it each bucket currently contains, so that
145  * allocation can reuse buckets sooner when they've been mostly overwritten.
146  *
147  * It also does some things that are really internal to the btree
148  * implementation. If a btree node contains pointers that are stale by more than
149  * some threshold, it rewrites the btree node to avoid the bucket's generation
150  * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151  *
152  * THE JOURNAL:
153  *
154  * Bcache's journal is not necessary for consistency; we always strictly
155  * order metadata writes so that the btree and everything else is consistent on
156  * disk in the event of an unclean shutdown, and in fact bcache had writeback
157  * caching (with recovery from unclean shutdown) before journalling was
158  * implemented.
159  *
160  * Rather, the journal is purely a performance optimization; we can't complete a
161  * write until we've updated the index on disk, otherwise the cache would be
162  * inconsistent in the event of an unclean shutdown. This means that without the
163  * journal, on random write workloads we constantly have to update all the leaf
164  * nodes in the btree, and those writes will be mostly empty (appending at most
165  * a few keys each) - highly inefficient in terms of amount of metadata writes,
166  * and it puts more strain on the various btree resorting/compacting code.
167  *
168  * The journal is just a log of keys we've inserted; on startup we just reinsert
169  * all the keys in the open journal entries. That means that when we're updating
170  * a node in the btree, we can wait until a 4k block of keys fills up before
171  * writing them out.
172  *
173  * For simplicity, we only journal updates to leaf nodes; updates to parent
174  * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175  * the complexity to deal with journalling them (in particular, journal replay)
176  * - updates to non leaf nodes just happen synchronously (see btree_split()).
177  */
178 
179 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
180 
181 #include <linux/bcache.h>
182 #include <linux/bio.h>
183 #include <linux/kobject.h>
184 #include <linux/list.h>
185 #include <linux/mutex.h>
186 #include <linux/rbtree.h>
187 #include <linux/rwsem.h>
188 #include <linux/types.h>
189 #include <linux/workqueue.h>
190 
191 #include "bset.h"
192 #include "util.h"
193 #include "closure.h"
194 
195 struct bucket {
196 	atomic_t	pin;
197 	uint16_t	prio;
198 	uint8_t		gen;
199 	uint8_t		last_gc; /* Most out of date gen in the btree */
200 	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
201 };
202 
203 /*
204  * I'd use bitfields for these, but I don't trust the compiler not to screw me
205  * as multiple threads touch struct bucket without locking
206  */
207 
208 BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
209 #define GC_MARK_RECLAIMABLE	1
210 #define GC_MARK_DIRTY		2
211 #define GC_MARK_METADATA	3
212 #define GC_SECTORS_USED_SIZE	13
213 #define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
214 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
215 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
216 
217 #include "journal.h"
218 #include "stats.h"
219 struct search;
220 struct btree;
221 struct keybuf;
222 
223 struct keybuf_key {
224 	struct rb_node		node;
225 	BKEY_PADDED(key);
226 	void			*private;
227 };
228 
229 struct keybuf {
230 	struct bkey		last_scanned;
231 	spinlock_t		lock;
232 
233 	/*
234 	 * Beginning and end of range in rb tree - so that we can skip taking
235 	 * lock and checking the rb tree when we need to check for overlapping
236 	 * keys.
237 	 */
238 	struct bkey		start;
239 	struct bkey		end;
240 
241 	struct rb_root		keys;
242 
243 #define KEYBUF_NR		500
244 	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
245 };
246 
247 struct bcache_device {
248 	struct closure		cl;
249 
250 	struct kobject		kobj;
251 
252 	struct cache_set	*c;
253 	unsigned		id;
254 #define BCACHEDEVNAME_SIZE	12
255 	char			name[BCACHEDEVNAME_SIZE];
256 
257 	struct gendisk		*disk;
258 
259 	unsigned long		flags;
260 #define BCACHE_DEV_CLOSING	0
261 #define BCACHE_DEV_DETACHING	1
262 #define BCACHE_DEV_UNLINK_DONE	2
263 
264 	unsigned		nr_stripes;
265 	unsigned		stripe_size;
266 	atomic_t		*stripe_sectors_dirty;
267 	unsigned long		*full_dirty_stripes;
268 
269 	unsigned long		sectors_dirty_last;
270 	long			sectors_dirty_derivative;
271 
272 	struct bio_set		*bio_split;
273 
274 	unsigned		data_csum:1;
275 
276 	int (*cache_miss)(struct btree *, struct search *,
277 			  struct bio *, unsigned);
278 	int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
279 };
280 
281 struct io {
282 	/* Used to track sequential IO so it can be skipped */
283 	struct hlist_node	hash;
284 	struct list_head	lru;
285 
286 	unsigned long		jiffies;
287 	unsigned		sequential;
288 	sector_t		last;
289 };
290 
291 struct cached_dev {
292 	struct list_head	list;
293 	struct bcache_device	disk;
294 	struct block_device	*bdev;
295 
296 	struct cache_sb		sb;
297 	struct bio		sb_bio;
298 	struct bio_vec		sb_bv[1];
299 	struct closure		sb_write;
300 	struct semaphore	sb_write_mutex;
301 
302 	/* Refcount on the cache set. Always nonzero when we're caching. */
303 	atomic_t		count;
304 	struct work_struct	detach;
305 
306 	/*
307 	 * Device might not be running if it's dirty and the cache set hasn't
308 	 * showed up yet.
309 	 */
310 	atomic_t		running;
311 
312 	/*
313 	 * Writes take a shared lock from start to finish; scanning for dirty
314 	 * data to refill the rb tree requires an exclusive lock.
315 	 */
316 	struct rw_semaphore	writeback_lock;
317 
318 	/*
319 	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
320 	 * data in the cache. Protected by writeback_lock; must have an
321 	 * shared lock to set and exclusive lock to clear.
322 	 */
323 	atomic_t		has_dirty;
324 
325 	struct bch_ratelimit	writeback_rate;
326 	struct delayed_work	writeback_rate_update;
327 
328 	/*
329 	 * Internal to the writeback code, so read_dirty() can keep track of
330 	 * where it's at.
331 	 */
332 	sector_t		last_read;
333 
334 	/* Limit number of writeback bios in flight */
335 	struct semaphore	in_flight;
336 	struct task_struct	*writeback_thread;
337 	struct workqueue_struct	*writeback_write_wq;
338 
339 	struct keybuf		writeback_keys;
340 
341 	/* For tracking sequential IO */
342 #define RECENT_IO_BITS	7
343 #define RECENT_IO	(1 << RECENT_IO_BITS)
344 	struct io		io[RECENT_IO];
345 	struct hlist_head	io_hash[RECENT_IO + 1];
346 	struct list_head	io_lru;
347 	spinlock_t		io_lock;
348 
349 	struct cache_accounting	accounting;
350 
351 	/* The rest of this all shows up in sysfs */
352 	unsigned		sequential_cutoff;
353 	unsigned		readahead;
354 
355 	unsigned		verify:1;
356 	unsigned		bypass_torture_test:1;
357 
358 	unsigned		partial_stripes_expensive:1;
359 	unsigned		writeback_metadata:1;
360 	unsigned		writeback_running:1;
361 	unsigned char		writeback_percent;
362 	unsigned		writeback_delay;
363 
364 	uint64_t		writeback_rate_target;
365 	int64_t			writeback_rate_proportional;
366 	int64_t			writeback_rate_derivative;
367 	int64_t			writeback_rate_change;
368 
369 	unsigned		writeback_rate_update_seconds;
370 	unsigned		writeback_rate_d_term;
371 	unsigned		writeback_rate_p_term_inverse;
372 };
373 
374 enum alloc_reserve {
375 	RESERVE_BTREE,
376 	RESERVE_PRIO,
377 	RESERVE_MOVINGGC,
378 	RESERVE_NONE,
379 	RESERVE_NR,
380 };
381 
382 struct cache {
383 	struct cache_set	*set;
384 	struct cache_sb		sb;
385 	struct bio		sb_bio;
386 	struct bio_vec		sb_bv[1];
387 
388 	struct kobject		kobj;
389 	struct block_device	*bdev;
390 
391 	struct task_struct	*alloc_thread;
392 
393 	struct closure		prio;
394 	struct prio_set		*disk_buckets;
395 
396 	/*
397 	 * When allocating new buckets, prio_write() gets first dibs - since we
398 	 * may not be allocate at all without writing priorities and gens.
399 	 * prio_buckets[] contains the last buckets we wrote priorities to (so
400 	 * gc can mark them as metadata), prio_next[] contains the buckets
401 	 * allocated for the next prio write.
402 	 */
403 	uint64_t		*prio_buckets;
404 	uint64_t		*prio_last_buckets;
405 
406 	/*
407 	 * free: Buckets that are ready to be used
408 	 *
409 	 * free_inc: Incoming buckets - these are buckets that currently have
410 	 * cached data in them, and we can't reuse them until after we write
411 	 * their new gen to disk. After prio_write() finishes writing the new
412 	 * gens/prios, they'll be moved to the free list (and possibly discarded
413 	 * in the process)
414 	 */
415 	DECLARE_FIFO(long, free)[RESERVE_NR];
416 	DECLARE_FIFO(long, free_inc);
417 
418 	size_t			fifo_last_bucket;
419 
420 	/* Allocation stuff: */
421 	struct bucket		*buckets;
422 
423 	DECLARE_HEAP(struct bucket *, heap);
424 
425 	/*
426 	 * If nonzero, we know we aren't going to find any buckets to invalidate
427 	 * until a gc finishes - otherwise we could pointlessly burn a ton of
428 	 * cpu
429 	 */
430 	unsigned		invalidate_needs_gc;
431 
432 	bool			discard; /* Get rid of? */
433 
434 	struct journal_device	journal;
435 
436 	/* The rest of this all shows up in sysfs */
437 #define IO_ERROR_SHIFT		20
438 	atomic_t		io_errors;
439 	atomic_t		io_count;
440 
441 	atomic_long_t		meta_sectors_written;
442 	atomic_long_t		btree_sectors_written;
443 	atomic_long_t		sectors_written;
444 };
445 
446 struct gc_stat {
447 	size_t			nodes;
448 	size_t			key_bytes;
449 
450 	size_t			nkeys;
451 	uint64_t		data;	/* sectors */
452 	unsigned		in_use; /* percent */
453 };
454 
455 /*
456  * Flag bits, for how the cache set is shutting down, and what phase it's at:
457  *
458  * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
459  * all the backing devices first (their cached data gets invalidated, and they
460  * won't automatically reattach).
461  *
462  * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
463  * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
464  * flushing dirty data).
465  *
466  * CACHE_SET_RUNNING means all cache devices have been registered and journal
467  * replay is complete.
468  */
469 #define CACHE_SET_UNREGISTERING		0
470 #define	CACHE_SET_STOPPING		1
471 #define	CACHE_SET_RUNNING		2
472 
473 struct cache_set {
474 	struct closure		cl;
475 
476 	struct list_head	list;
477 	struct kobject		kobj;
478 	struct kobject		internal;
479 	struct dentry		*debug;
480 	struct cache_accounting accounting;
481 
482 	unsigned long		flags;
483 
484 	struct cache_sb		sb;
485 
486 	struct cache		*cache[MAX_CACHES_PER_SET];
487 	struct cache		*cache_by_alloc[MAX_CACHES_PER_SET];
488 	int			caches_loaded;
489 
490 	struct bcache_device	**devices;
491 	struct list_head	cached_devs;
492 	uint64_t		cached_dev_sectors;
493 	struct closure		caching;
494 
495 	struct closure		sb_write;
496 	struct semaphore	sb_write_mutex;
497 
498 	mempool_t		*search;
499 	mempool_t		*bio_meta;
500 	struct bio_set		*bio_split;
501 
502 	/* For the btree cache */
503 	struct shrinker		shrink;
504 
505 	/* For the btree cache and anything allocation related */
506 	struct mutex		bucket_lock;
507 
508 	/* log2(bucket_size), in sectors */
509 	unsigned short		bucket_bits;
510 
511 	/* log2(block_size), in sectors */
512 	unsigned short		block_bits;
513 
514 	/*
515 	 * Default number of pages for a new btree node - may be less than a
516 	 * full bucket
517 	 */
518 	unsigned		btree_pages;
519 
520 	/*
521 	 * Lists of struct btrees; lru is the list for structs that have memory
522 	 * allocated for actual btree node, freed is for structs that do not.
523 	 *
524 	 * We never free a struct btree, except on shutdown - we just put it on
525 	 * the btree_cache_freed list and reuse it later. This simplifies the
526 	 * code, and it doesn't cost us much memory as the memory usage is
527 	 * dominated by buffers that hold the actual btree node data and those
528 	 * can be freed - and the number of struct btrees allocated is
529 	 * effectively bounded.
530 	 *
531 	 * btree_cache_freeable effectively is a small cache - we use it because
532 	 * high order page allocations can be rather expensive, and it's quite
533 	 * common to delete and allocate btree nodes in quick succession. It
534 	 * should never grow past ~2-3 nodes in practice.
535 	 */
536 	struct list_head	btree_cache;
537 	struct list_head	btree_cache_freeable;
538 	struct list_head	btree_cache_freed;
539 
540 	/* Number of elements in btree_cache + btree_cache_freeable lists */
541 	unsigned		btree_cache_used;
542 
543 	/*
544 	 * If we need to allocate memory for a new btree node and that
545 	 * allocation fails, we can cannibalize another node in the btree cache
546 	 * to satisfy the allocation - lock to guarantee only one thread does
547 	 * this at a time:
548 	 */
549 	wait_queue_head_t	btree_cache_wait;
550 	struct task_struct	*btree_cache_alloc_lock;
551 
552 	/*
553 	 * When we free a btree node, we increment the gen of the bucket the
554 	 * node is in - but we can't rewrite the prios and gens until we
555 	 * finished whatever it is we were doing, otherwise after a crash the
556 	 * btree node would be freed but for say a split, we might not have the
557 	 * pointers to the new nodes inserted into the btree yet.
558 	 *
559 	 * This is a refcount that blocks prio_write() until the new keys are
560 	 * written.
561 	 */
562 	atomic_t		prio_blocked;
563 	wait_queue_head_t	bucket_wait;
564 
565 	/*
566 	 * For any bio we don't skip we subtract the number of sectors from
567 	 * rescale; when it hits 0 we rescale all the bucket priorities.
568 	 */
569 	atomic_t		rescale;
570 	/*
571 	 * When we invalidate buckets, we use both the priority and the amount
572 	 * of good data to determine which buckets to reuse first - to weight
573 	 * those together consistently we keep track of the smallest nonzero
574 	 * priority of any bucket.
575 	 */
576 	uint16_t		min_prio;
577 
578 	/*
579 	 * max(gen - last_gc) for all buckets. When it gets too big we have to gc
580 	 * to keep gens from wrapping around.
581 	 */
582 	uint8_t			need_gc;
583 	struct gc_stat		gc_stats;
584 	size_t			nbuckets;
585 
586 	struct task_struct	*gc_thread;
587 	/* Where in the btree gc currently is */
588 	struct bkey		gc_done;
589 
590 	/*
591 	 * The allocation code needs gc_mark in struct bucket to be correct, but
592 	 * it's not while a gc is in progress. Protected by bucket_lock.
593 	 */
594 	int			gc_mark_valid;
595 
596 	/* Counts how many sectors bio_insert has added to the cache */
597 	atomic_t		sectors_to_gc;
598 	wait_queue_head_t	gc_wait;
599 
600 	struct keybuf		moving_gc_keys;
601 	/* Number of moving GC bios in flight */
602 	struct semaphore	moving_in_flight;
603 
604 	struct workqueue_struct	*moving_gc_wq;
605 
606 	struct btree		*root;
607 
608 #ifdef CONFIG_BCACHE_DEBUG
609 	struct btree		*verify_data;
610 	struct bset		*verify_ondisk;
611 	struct mutex		verify_lock;
612 #endif
613 
614 	unsigned		nr_uuids;
615 	struct uuid_entry	*uuids;
616 	BKEY_PADDED(uuid_bucket);
617 	struct closure		uuid_write;
618 	struct semaphore	uuid_write_mutex;
619 
620 	/*
621 	 * A btree node on disk could have too many bsets for an iterator to fit
622 	 * on the stack - have to dynamically allocate them
623 	 */
624 	mempool_t		*fill_iter;
625 
626 	struct bset_sort_state	sort;
627 
628 	/* List of buckets we're currently writing data to */
629 	struct list_head	data_buckets;
630 	spinlock_t		data_bucket_lock;
631 
632 	struct journal		journal;
633 
634 #define CONGESTED_MAX		1024
635 	unsigned		congested_last_us;
636 	atomic_t		congested;
637 
638 	/* The rest of this all shows up in sysfs */
639 	unsigned		congested_read_threshold_us;
640 	unsigned		congested_write_threshold_us;
641 
642 	struct time_stats	btree_gc_time;
643 	struct time_stats	btree_split_time;
644 	struct time_stats	btree_read_time;
645 
646 	atomic_long_t		cache_read_races;
647 	atomic_long_t		writeback_keys_done;
648 	atomic_long_t		writeback_keys_failed;
649 
650 	enum			{
651 		ON_ERROR_UNREGISTER,
652 		ON_ERROR_PANIC,
653 	}			on_error;
654 	unsigned		error_limit;
655 	unsigned		error_decay;
656 
657 	unsigned short		journal_delay_ms;
658 	bool			expensive_debug_checks;
659 	unsigned		verify:1;
660 	unsigned		key_merging_disabled:1;
661 	unsigned		gc_always_rewrite:1;
662 	unsigned		shrinker_disabled:1;
663 	unsigned		copy_gc_enabled:1;
664 
665 #define BUCKET_HASH_BITS	12
666 	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
667 };
668 
669 struct bbio {
670 	unsigned		submit_time_us;
671 	union {
672 		struct bkey	key;
673 		uint64_t	_pad[3];
674 		/*
675 		 * We only need pad = 3 here because we only ever carry around a
676 		 * single pointer - i.e. the pointer we're doing io to/from.
677 		 */
678 	};
679 	struct bio		bio;
680 };
681 
682 #define BTREE_PRIO		USHRT_MAX
683 #define INITIAL_PRIO		32768U
684 
685 #define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
686 #define btree_blocks(b)							\
687 	((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
688 
689 #define btree_default_blocks(c)						\
690 	((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
691 
692 #define bucket_pages(c)		((c)->sb.bucket_size / PAGE_SECTORS)
693 #define bucket_bytes(c)		((c)->sb.bucket_size << 9)
694 #define block_bytes(c)		((c)->sb.block_size << 9)
695 
696 #define prios_per_bucket(c)				\
697 	((bucket_bytes(c) - sizeof(struct prio_set)) /	\
698 	 sizeof(struct bucket_disk))
699 #define prio_buckets(c)					\
700 	DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
701 
702 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
703 {
704 	return s >> c->bucket_bits;
705 }
706 
707 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
708 {
709 	return ((sector_t) b) << c->bucket_bits;
710 }
711 
712 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
713 {
714 	return s & (c->sb.bucket_size - 1);
715 }
716 
717 static inline struct cache *PTR_CACHE(struct cache_set *c,
718 				      const struct bkey *k,
719 				      unsigned ptr)
720 {
721 	return c->cache[PTR_DEV(k, ptr)];
722 }
723 
724 static inline size_t PTR_BUCKET_NR(struct cache_set *c,
725 				   const struct bkey *k,
726 				   unsigned ptr)
727 {
728 	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
729 }
730 
731 static inline struct bucket *PTR_BUCKET(struct cache_set *c,
732 					const struct bkey *k,
733 					unsigned ptr)
734 {
735 	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
736 }
737 
738 static inline uint8_t gen_after(uint8_t a, uint8_t b)
739 {
740 	uint8_t r = a - b;
741 	return r > 128U ? 0 : r;
742 }
743 
744 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
745 				unsigned i)
746 {
747 	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
748 }
749 
750 static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
751 				 unsigned i)
752 {
753 	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
754 }
755 
756 /* Btree key macros */
757 
758 /*
759  * This is used for various on disk data structures - cache_sb, prio_set, bset,
760  * jset: The checksum is _always_ the first 8 bytes of these structs
761  */
762 #define csum_set(i)							\
763 	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
764 		  ((void *) bset_bkey_last(i)) -			\
765 		  (((void *) (i)) + sizeof(uint64_t)))
766 
767 /* Error handling macros */
768 
769 #define btree_bug(b, ...)						\
770 do {									\
771 	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
772 		dump_stack();						\
773 } while (0)
774 
775 #define cache_bug(c, ...)						\
776 do {									\
777 	if (bch_cache_set_error(c, __VA_ARGS__))			\
778 		dump_stack();						\
779 } while (0)
780 
781 #define btree_bug_on(cond, b, ...)					\
782 do {									\
783 	if (cond)							\
784 		btree_bug(b, __VA_ARGS__);				\
785 } while (0)
786 
787 #define cache_bug_on(cond, c, ...)					\
788 do {									\
789 	if (cond)							\
790 		cache_bug(c, __VA_ARGS__);				\
791 } while (0)
792 
793 #define cache_set_err_on(cond, c, ...)					\
794 do {									\
795 	if (cond)							\
796 		bch_cache_set_error(c, __VA_ARGS__);			\
797 } while (0)
798 
799 /* Looping macros */
800 
801 #define for_each_cache(ca, cs, iter)					\
802 	for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
803 
804 #define for_each_bucket(b, ca)						\
805 	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
806 	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
807 
808 static inline void cached_dev_put(struct cached_dev *dc)
809 {
810 	if (atomic_dec_and_test(&dc->count))
811 		schedule_work(&dc->detach);
812 }
813 
814 static inline bool cached_dev_get(struct cached_dev *dc)
815 {
816 	if (!atomic_inc_not_zero(&dc->count))
817 		return false;
818 
819 	/* Paired with the mb in cached_dev_attach */
820 	smp_mb__after_atomic();
821 	return true;
822 }
823 
824 /*
825  * bucket_gc_gen() returns the difference between the bucket's current gen and
826  * the oldest gen of any pointer into that bucket in the btree (last_gc).
827  */
828 
829 static inline uint8_t bucket_gc_gen(struct bucket *b)
830 {
831 	return b->gen - b->last_gc;
832 }
833 
834 #define BUCKET_GC_GEN_MAX	96U
835 
836 #define kobj_attribute_write(n, fn)					\
837 	static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
838 
839 #define kobj_attribute_rw(n, show, store)				\
840 	static struct kobj_attribute ksysfs_##n =			\
841 		__ATTR(n, S_IWUSR|S_IRUSR, show, store)
842 
843 static inline void wake_up_allocators(struct cache_set *c)
844 {
845 	struct cache *ca;
846 	unsigned i;
847 
848 	for_each_cache(ca, c, i)
849 		wake_up_process(ca->alloc_thread);
850 }
851 
852 /* Forward declarations */
853 
854 void bch_count_io_errors(struct cache *, blk_status_t, const char *);
855 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
856 			      blk_status_t, const char *);
857 void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
858 		const char *);
859 void bch_bbio_free(struct bio *, struct cache_set *);
860 struct bio *bch_bbio_alloc(struct cache_set *);
861 
862 void __bch_submit_bbio(struct bio *, struct cache_set *);
863 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
864 
865 uint8_t bch_inc_gen(struct cache *, struct bucket *);
866 void bch_rescale_priorities(struct cache_set *, int);
867 
868 bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
869 void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
870 
871 void __bch_bucket_free(struct cache *, struct bucket *);
872 void bch_bucket_free(struct cache_set *, struct bkey *);
873 
874 long bch_bucket_alloc(struct cache *, unsigned, bool);
875 int __bch_bucket_alloc_set(struct cache_set *, unsigned,
876 			   struct bkey *, int, bool);
877 int bch_bucket_alloc_set(struct cache_set *, unsigned,
878 			 struct bkey *, int, bool);
879 bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
880 		       unsigned, unsigned, bool);
881 
882 __printf(2, 3)
883 bool bch_cache_set_error(struct cache_set *, const char *, ...);
884 
885 void bch_prio_write(struct cache *);
886 void bch_write_bdev_super(struct cached_dev *, struct closure *);
887 
888 extern struct workqueue_struct *bcache_wq;
889 extern const char * const bch_cache_modes[];
890 extern struct mutex bch_register_lock;
891 extern struct list_head bch_cache_sets;
892 
893 extern struct kobj_type bch_cached_dev_ktype;
894 extern struct kobj_type bch_flash_dev_ktype;
895 extern struct kobj_type bch_cache_set_ktype;
896 extern struct kobj_type bch_cache_set_internal_ktype;
897 extern struct kobj_type bch_cache_ktype;
898 
899 void bch_cached_dev_release(struct kobject *);
900 void bch_flash_dev_release(struct kobject *);
901 void bch_cache_set_release(struct kobject *);
902 void bch_cache_release(struct kobject *);
903 
904 int bch_uuid_write(struct cache_set *);
905 void bcache_write_super(struct cache_set *);
906 
907 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
908 
909 int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
910 void bch_cached_dev_detach(struct cached_dev *);
911 void bch_cached_dev_run(struct cached_dev *);
912 void bcache_device_stop(struct bcache_device *);
913 
914 void bch_cache_set_unregister(struct cache_set *);
915 void bch_cache_set_stop(struct cache_set *);
916 
917 struct cache_set *bch_cache_set_alloc(struct cache_sb *);
918 void bch_btree_cache_free(struct cache_set *);
919 int bch_btree_cache_alloc(struct cache_set *);
920 void bch_moving_init_cache_set(struct cache_set *);
921 int bch_open_buckets_alloc(struct cache_set *);
922 void bch_open_buckets_free(struct cache_set *);
923 
924 int bch_cache_allocator_start(struct cache *ca);
925 
926 void bch_debug_exit(void);
927 int bch_debug_init(struct kobject *);
928 void bch_request_exit(void);
929 int bch_request_init(void);
930 
931 #endif /* _BCACHE_H */
932