xref: /linux/include/linux/blockgroup_lock.h (revision ab520be8cd5d56867fc95cfbc34b90880faf1f9d)
1 #ifndef _LINUX_BLOCKGROUP_LOCK_H
2 #define _LINUX_BLOCKGROUP_LOCK_H
3 /*
4  * Per-blockgroup locking for ext2 and ext3.
5  *
6  * Simple hashed spinlocking.
7  */
8 
9 #include <linux/spinlock.h>
10 #include <linux/cache.h>
11 
12 #ifdef CONFIG_SMP
13 #define NR_BG_LOCKS	(4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
14 #else
15 #define NR_BG_LOCKS	1
16 #endif
17 
18 struct bgl_lock {
19 	spinlock_t lock;
20 } ____cacheline_aligned_in_smp;
21 
22 struct blockgroup_lock {
23 	struct bgl_lock locks[NR_BG_LOCKS];
24 };
25 
26 static inline void bgl_lock_init(struct blockgroup_lock *bgl)
27 {
28 	int i;
29 
30 	for (i = 0; i < NR_BG_LOCKS; i++)
31 		spin_lock_init(&bgl->locks[i].lock);
32 }
33 
34 static inline spinlock_t *
35 bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
36 {
37 	return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock;
38 }
39 
40 #endif
41