xref: /linux/fs/ocfs2/dlmglue.c (revision 6ed7ffddcf61f668114edb676417e5fb33773b59)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmglue.c
5  *
6  * Code which implements an OCFS2 specific interface to our DLM.
7  *
8  * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25 
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
29 #include <linux/mm.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
36 
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
39 
40 #include "ocfs2.h"
41 #include "ocfs2_lockingver.h"
42 
43 #include "alloc.h"
44 #include "dcache.h"
45 #include "dlmglue.h"
46 #include "extent_map.h"
47 #include "file.h"
48 #include "heartbeat.h"
49 #include "inode.h"
50 #include "journal.h"
51 #include "stackglue.h"
52 #include "slot_map.h"
53 #include "super.h"
54 #include "uptodate.h"
55 #include "quota.h"
56 #include "refcounttree.h"
57 
58 #include "buffer_head_io.h"
59 
60 struct ocfs2_mask_waiter {
61 	struct list_head	mw_item;
62 	int			mw_status;
63 	struct completion	mw_complete;
64 	unsigned long		mw_mask;
65 	unsigned long		mw_goal;
66 #ifdef CONFIG_OCFS2_FS_STATS
67 	ktime_t			mw_lock_start;
68 #endif
69 };
70 
71 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
73 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
75 
76 /*
77  * Return value from ->downconvert_worker functions.
78  *
79  * These control the precise actions of ocfs2_unblock_lock()
80  * and ocfs2_process_blocked_lock()
81  *
82  */
83 enum ocfs2_unblock_action {
84 	UNBLOCK_CONTINUE	= 0, /* Continue downconvert */
85 	UNBLOCK_CONTINUE_POST	= 1, /* Continue downconvert, fire
86 				      * ->post_unlock callback */
87 	UNBLOCK_STOP_POST	= 2, /* Do not downconvert, fire
88 				      * ->post_unlock() callback. */
89 };
90 
91 struct ocfs2_unblock_ctl {
92 	int requeue;
93 	enum ocfs2_unblock_action unblock_action;
94 };
95 
96 /* Lockdep class keys */
97 struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
98 
99 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
100 					int new_level);
101 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
102 
103 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
104 				     int blocking);
105 
106 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
107 				       int blocking);
108 
109 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
110 				     struct ocfs2_lock_res *lockres);
111 
112 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
113 
114 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
115 					    int new_level);
116 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
117 					 int blocking);
118 
119 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
120 
121 /* This aids in debugging situations where a bad LVB might be involved. */
122 static void ocfs2_dump_meta_lvb_info(u64 level,
123 				     const char *function,
124 				     unsigned int line,
125 				     struct ocfs2_lock_res *lockres)
126 {
127 	struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
128 
129 	mlog(level, "LVB information for %s (called from %s:%u):\n",
130 	     lockres->l_name, function, line);
131 	mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
132 	     lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
133 	     be32_to_cpu(lvb->lvb_igeneration));
134 	mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
135 	     (unsigned long long)be64_to_cpu(lvb->lvb_isize),
136 	     be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
137 	     be16_to_cpu(lvb->lvb_imode));
138 	mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
139 	     "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
140 	     (long long)be64_to_cpu(lvb->lvb_iatime_packed),
141 	     (long long)be64_to_cpu(lvb->lvb_ictime_packed),
142 	     (long long)be64_to_cpu(lvb->lvb_imtime_packed),
143 	     be32_to_cpu(lvb->lvb_iattr));
144 }
145 
146 
147 /*
148  * OCFS2 Lock Resource Operations
149  *
150  * These fine tune the behavior of the generic dlmglue locking infrastructure.
151  *
152  * The most basic of lock types can point ->l_priv to their respective
153  * struct ocfs2_super and allow the default actions to manage things.
154  *
155  * Right now, each lock type also needs to implement an init function,
156  * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
157  * should be called when the lock is no longer needed (i.e., object
158  * destruction time).
159  */
160 struct ocfs2_lock_res_ops {
161 	/*
162 	 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
163 	 * this callback if ->l_priv is not an ocfs2_super pointer
164 	 */
165 	struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
166 
167 	/*
168 	 * Optionally called in the downconvert thread after a
169 	 * successful downconvert. The lockres will not be referenced
170 	 * after this callback is called, so it is safe to free
171 	 * memory, etc.
172 	 *
173 	 * The exact semantics of when this is called are controlled
174 	 * by ->downconvert_worker()
175 	 */
176 	void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
177 
178 	/*
179 	 * Allow a lock type to add checks to determine whether it is
180 	 * safe to downconvert a lock. Return 0 to re-queue the
181 	 * downconvert at a later time, nonzero to continue.
182 	 *
183 	 * For most locks, the default checks that there are no
184 	 * incompatible holders are sufficient.
185 	 *
186 	 * Called with the lockres spinlock held.
187 	 */
188 	int (*check_downconvert)(struct ocfs2_lock_res *, int);
189 
190 	/*
191 	 * Allows a lock type to populate the lock value block. This
192 	 * is called on downconvert, and when we drop a lock.
193 	 *
194 	 * Locks that want to use this should set LOCK_TYPE_USES_LVB
195 	 * in the flags field.
196 	 *
197 	 * Called with the lockres spinlock held.
198 	 */
199 	void (*set_lvb)(struct ocfs2_lock_res *);
200 
201 	/*
202 	 * Called from the downconvert thread when it is determined
203 	 * that a lock will be downconverted. This is called without
204 	 * any locks held so the function can do work that might
205 	 * schedule (syncing out data, etc).
206 	 *
207 	 * This should return any one of the ocfs2_unblock_action
208 	 * values, depending on what it wants the thread to do.
209 	 */
210 	int (*downconvert_worker)(struct ocfs2_lock_res *, int);
211 
212 	/*
213 	 * LOCK_TYPE_* flags which describe the specific requirements
214 	 * of a lock type. Descriptions of each individual flag follow.
215 	 */
216 	int flags;
217 };
218 
219 /*
220  * Some locks want to "refresh" potentially stale data when a
221  * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
222  * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
223  * individual lockres l_flags member from the ast function. It is
224  * expected that the locking wrapper will clear the
225  * OCFS2_LOCK_NEEDS_REFRESH flag when done.
226  */
227 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
228 
229 /*
230  * Indicate that a lock type makes use of the lock value block. The
231  * ->set_lvb lock type callback must be defined.
232  */
233 #define LOCK_TYPE_USES_LVB		0x2
234 
235 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
236 	.get_osb	= ocfs2_get_inode_osb,
237 	.flags		= 0,
238 };
239 
240 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
241 	.get_osb	= ocfs2_get_inode_osb,
242 	.check_downconvert = ocfs2_check_meta_downconvert,
243 	.set_lvb	= ocfs2_set_meta_lvb,
244 	.downconvert_worker = ocfs2_data_convert_worker,
245 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
246 };
247 
248 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
249 	.flags		= LOCK_TYPE_REQUIRES_REFRESH,
250 };
251 
252 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
253 	.flags		= 0,
254 };
255 
256 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
257 	.flags		= 0,
258 };
259 
260 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
261 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
262 };
263 
264 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
265 	.get_osb	= ocfs2_get_dentry_osb,
266 	.post_unlock	= ocfs2_dentry_post_unlock,
267 	.downconvert_worker = ocfs2_dentry_convert_worker,
268 	.flags		= 0,
269 };
270 
271 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
272 	.get_osb	= ocfs2_get_inode_osb,
273 	.flags		= 0,
274 };
275 
276 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
277 	.get_osb	= ocfs2_get_file_osb,
278 	.flags		= 0,
279 };
280 
281 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
282 	.set_lvb	= ocfs2_set_qinfo_lvb,
283 	.get_osb	= ocfs2_get_qinfo_osb,
284 	.flags		= LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
285 };
286 
287 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
288 	.check_downconvert = ocfs2_check_refcount_downconvert,
289 	.downconvert_worker = ocfs2_refcount_convert_worker,
290 	.flags		= 0,
291 };
292 
293 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
294 {
295 	return lockres->l_type == OCFS2_LOCK_TYPE_META ||
296 		lockres->l_type == OCFS2_LOCK_TYPE_RW ||
297 		lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
298 }
299 
300 static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
301 {
302 	return container_of(lksb, struct ocfs2_lock_res, l_lksb);
303 }
304 
305 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
306 {
307 	BUG_ON(!ocfs2_is_inode_lock(lockres));
308 
309 	return (struct inode *) lockres->l_priv;
310 }
311 
312 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
313 {
314 	BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
315 
316 	return (struct ocfs2_dentry_lock *)lockres->l_priv;
317 }
318 
319 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
320 {
321 	BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
322 
323 	return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
324 }
325 
326 static inline struct ocfs2_refcount_tree *
327 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
328 {
329 	return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
330 }
331 
332 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
333 {
334 	if (lockres->l_ops->get_osb)
335 		return lockres->l_ops->get_osb(lockres);
336 
337 	return (struct ocfs2_super *)lockres->l_priv;
338 }
339 
340 static int ocfs2_lock_create(struct ocfs2_super *osb,
341 			     struct ocfs2_lock_res *lockres,
342 			     int level,
343 			     u32 dlm_flags);
344 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
345 						     int wanted);
346 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
347 				   struct ocfs2_lock_res *lockres,
348 				   int level, unsigned long caller_ip);
349 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
350 					struct ocfs2_lock_res *lockres,
351 					int level)
352 {
353 	__ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
354 }
355 
356 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
357 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
358 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
359 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
360 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
361 					struct ocfs2_lock_res *lockres);
362 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
363 						int convert);
364 #define ocfs2_log_dlm_error(_func, _err, _lockres) do {					\
365 	if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY)				\
366 		mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n",	\
367 		     _err, _func, _lockres->l_name);					\
368 	else										\
369 		mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n",	\
370 		     _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name,	\
371 		     (unsigned int)ocfs2_get_dentry_lock_ino(_lockres));		\
372 } while (0)
373 static int ocfs2_downconvert_thread(void *arg);
374 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
375 					struct ocfs2_lock_res *lockres);
376 static int ocfs2_inode_lock_update(struct inode *inode,
377 				  struct buffer_head **bh);
378 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
379 static inline int ocfs2_highest_compat_lock_level(int level);
380 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
381 					      int new_level);
382 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
383 				  struct ocfs2_lock_res *lockres,
384 				  int new_level,
385 				  int lvb,
386 				  unsigned int generation);
387 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
388 				        struct ocfs2_lock_res *lockres);
389 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
390 				struct ocfs2_lock_res *lockres);
391 
392 
393 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
394 				  u64 blkno,
395 				  u32 generation,
396 				  char *name)
397 {
398 	int len;
399 
400 	BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
401 
402 	len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
403 		       ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
404 		       (long long)blkno, generation);
405 
406 	BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
407 
408 	mlog(0, "built lock resource with name: %s\n", name);
409 }
410 
411 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
412 
413 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
414 				       struct ocfs2_dlm_debug *dlm_debug)
415 {
416 	mlog(0, "Add tracking for lockres %s\n", res->l_name);
417 
418 	spin_lock(&ocfs2_dlm_tracking_lock);
419 	list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
420 	spin_unlock(&ocfs2_dlm_tracking_lock);
421 }
422 
423 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
424 {
425 	spin_lock(&ocfs2_dlm_tracking_lock);
426 	if (!list_empty(&res->l_debug_list))
427 		list_del_init(&res->l_debug_list);
428 	spin_unlock(&ocfs2_dlm_tracking_lock);
429 }
430 
431 #ifdef CONFIG_OCFS2_FS_STATS
432 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
433 {
434 	res->l_lock_refresh = 0;
435 	memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
436 	memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
437 }
438 
439 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
440 				    struct ocfs2_mask_waiter *mw, int ret)
441 {
442 	u32 usec;
443 	ktime_t kt;
444 	struct ocfs2_lock_stats *stats;
445 
446 	if (level == LKM_PRMODE)
447 		stats = &res->l_lock_prmode;
448 	else if (level == LKM_EXMODE)
449 		stats = &res->l_lock_exmode;
450 	else
451 		return;
452 
453 	kt = ktime_sub(ktime_get(), mw->mw_lock_start);
454 	usec = ktime_to_us(kt);
455 
456 	stats->ls_gets++;
457 	stats->ls_total += ktime_to_ns(kt);
458 	/* overflow */
459 	if (unlikely(stats->ls_gets == 0)) {
460 		stats->ls_gets++;
461 		stats->ls_total = ktime_to_ns(kt);
462 	}
463 
464 	if (stats->ls_max < usec)
465 		stats->ls_max = usec;
466 
467 	if (ret)
468 		stats->ls_fail++;
469 }
470 
471 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
472 {
473 	lockres->l_lock_refresh++;
474 }
475 
476 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
477 {
478 	mw->mw_lock_start = ktime_get();
479 }
480 #else
481 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
482 {
483 }
484 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
485 			   int level, struct ocfs2_mask_waiter *mw, int ret)
486 {
487 }
488 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
489 {
490 }
491 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
492 {
493 }
494 #endif
495 
496 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
497 				       struct ocfs2_lock_res *res,
498 				       enum ocfs2_lock_type type,
499 				       struct ocfs2_lock_res_ops *ops,
500 				       void *priv)
501 {
502 	res->l_type          = type;
503 	res->l_ops           = ops;
504 	res->l_priv          = priv;
505 
506 	res->l_level         = DLM_LOCK_IV;
507 	res->l_requested     = DLM_LOCK_IV;
508 	res->l_blocking      = DLM_LOCK_IV;
509 	res->l_action        = OCFS2_AST_INVALID;
510 	res->l_unlock_action = OCFS2_UNLOCK_INVALID;
511 
512 	res->l_flags         = OCFS2_LOCK_INITIALIZED;
513 
514 	ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
515 
516 	ocfs2_init_lock_stats(res);
517 #ifdef CONFIG_DEBUG_LOCK_ALLOC
518 	if (type != OCFS2_LOCK_TYPE_OPEN)
519 		lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
520 				 &lockdep_keys[type], 0);
521 	else
522 		res->l_lockdep_map.key = NULL;
523 #endif
524 }
525 
526 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
527 {
528 	/* This also clears out the lock status block */
529 	memset(res, 0, sizeof(struct ocfs2_lock_res));
530 	spin_lock_init(&res->l_lock);
531 	init_waitqueue_head(&res->l_event);
532 	INIT_LIST_HEAD(&res->l_blocked_list);
533 	INIT_LIST_HEAD(&res->l_mask_waiters);
534 }
535 
536 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
537 			       enum ocfs2_lock_type type,
538 			       unsigned int generation,
539 			       struct inode *inode)
540 {
541 	struct ocfs2_lock_res_ops *ops;
542 
543 	switch(type) {
544 		case OCFS2_LOCK_TYPE_RW:
545 			ops = &ocfs2_inode_rw_lops;
546 			break;
547 		case OCFS2_LOCK_TYPE_META:
548 			ops = &ocfs2_inode_inode_lops;
549 			break;
550 		case OCFS2_LOCK_TYPE_OPEN:
551 			ops = &ocfs2_inode_open_lops;
552 			break;
553 		default:
554 			mlog_bug_on_msg(1, "type: %d\n", type);
555 			ops = NULL; /* thanks, gcc */
556 			break;
557 	};
558 
559 	ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
560 			      generation, res->l_name);
561 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
562 }
563 
564 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
565 {
566 	struct inode *inode = ocfs2_lock_res_inode(lockres);
567 
568 	return OCFS2_SB(inode->i_sb);
569 }
570 
571 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
572 {
573 	struct ocfs2_mem_dqinfo *info = lockres->l_priv;
574 
575 	return OCFS2_SB(info->dqi_gi.dqi_sb);
576 }
577 
578 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
579 {
580 	struct ocfs2_file_private *fp = lockres->l_priv;
581 
582 	return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
583 }
584 
585 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
586 {
587 	__be64 inode_blkno_be;
588 
589 	memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
590 	       sizeof(__be64));
591 
592 	return be64_to_cpu(inode_blkno_be);
593 }
594 
595 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
596 {
597 	struct ocfs2_dentry_lock *dl = lockres->l_priv;
598 
599 	return OCFS2_SB(dl->dl_inode->i_sb);
600 }
601 
602 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
603 				u64 parent, struct inode *inode)
604 {
605 	int len;
606 	u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
607 	__be64 inode_blkno_be = cpu_to_be64(inode_blkno);
608 	struct ocfs2_lock_res *lockres = &dl->dl_lockres;
609 
610 	ocfs2_lock_res_init_once(lockres);
611 
612 	/*
613 	 * Unfortunately, the standard lock naming scheme won't work
614 	 * here because we have two 16 byte values to use. Instead,
615 	 * we'll stuff the inode number as a binary value. We still
616 	 * want error prints to show something without garbling the
617 	 * display, so drop a null byte in there before the inode
618 	 * number. A future version of OCFS2 will likely use all
619 	 * binary lock names. The stringified names have been a
620 	 * tremendous aid in debugging, but now that the debugfs
621 	 * interface exists, we can mangle things there if need be.
622 	 *
623 	 * NOTE: We also drop the standard "pad" value (the total lock
624 	 * name size stays the same though - the last part is all
625 	 * zeros due to the memset in ocfs2_lock_res_init_once()
626 	 */
627 	len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
628 		       "%c%016llx",
629 		       ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
630 		       (long long)parent);
631 
632 	BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
633 
634 	memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
635 	       sizeof(__be64));
636 
637 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
638 				   OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
639 				   dl);
640 }
641 
642 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
643 				      struct ocfs2_super *osb)
644 {
645 	/* Superblock lockres doesn't come from a slab so we call init
646 	 * once on it manually.  */
647 	ocfs2_lock_res_init_once(res);
648 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
649 			      0, res->l_name);
650 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
651 				   &ocfs2_super_lops, osb);
652 }
653 
654 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
655 				       struct ocfs2_super *osb)
656 {
657 	/* Rename lockres doesn't come from a slab so we call init
658 	 * once on it manually.  */
659 	ocfs2_lock_res_init_once(res);
660 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
661 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
662 				   &ocfs2_rename_lops, osb);
663 }
664 
665 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
666 					 struct ocfs2_super *osb)
667 {
668 	/* nfs_sync lockres doesn't come from a slab so we call init
669 	 * once on it manually.  */
670 	ocfs2_lock_res_init_once(res);
671 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
672 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
673 				   &ocfs2_nfs_sync_lops, osb);
674 }
675 
676 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
677 					    struct ocfs2_super *osb)
678 {
679 	ocfs2_lock_res_init_once(res);
680 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
681 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
682 				   &ocfs2_orphan_scan_lops, osb);
683 }
684 
685 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
686 			      struct ocfs2_file_private *fp)
687 {
688 	struct inode *inode = fp->fp_file->f_mapping->host;
689 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
690 
691 	ocfs2_lock_res_init_once(lockres);
692 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
693 			      inode->i_generation, lockres->l_name);
694 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
695 				   OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
696 				   fp);
697 	lockres->l_flags |= OCFS2_LOCK_NOCACHE;
698 }
699 
700 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
701 			       struct ocfs2_mem_dqinfo *info)
702 {
703 	ocfs2_lock_res_init_once(lockres);
704 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
705 			      0, lockres->l_name);
706 	ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
707 				   OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
708 				   info);
709 }
710 
711 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
712 				  struct ocfs2_super *osb, u64 ref_blkno,
713 				  unsigned int generation)
714 {
715 	ocfs2_lock_res_init_once(lockres);
716 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
717 			      generation, lockres->l_name);
718 	ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
719 				   &ocfs2_refcount_block_lops, osb);
720 }
721 
722 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
723 {
724 	if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
725 		return;
726 
727 	ocfs2_remove_lockres_tracking(res);
728 
729 	mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
730 			"Lockres %s is on the blocked list\n",
731 			res->l_name);
732 	mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
733 			"Lockres %s has mask waiters pending\n",
734 			res->l_name);
735 	mlog_bug_on_msg(spin_is_locked(&res->l_lock),
736 			"Lockres %s is locked\n",
737 			res->l_name);
738 	mlog_bug_on_msg(res->l_ro_holders,
739 			"Lockres %s has %u ro holders\n",
740 			res->l_name, res->l_ro_holders);
741 	mlog_bug_on_msg(res->l_ex_holders,
742 			"Lockres %s has %u ex holders\n",
743 			res->l_name, res->l_ex_holders);
744 
745 	/* Need to clear out the lock status block for the dlm */
746 	memset(&res->l_lksb, 0, sizeof(res->l_lksb));
747 
748 	res->l_flags = 0UL;
749 }
750 
751 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
752 				     int level)
753 {
754 	BUG_ON(!lockres);
755 
756 	switch(level) {
757 	case DLM_LOCK_EX:
758 		lockres->l_ex_holders++;
759 		break;
760 	case DLM_LOCK_PR:
761 		lockres->l_ro_holders++;
762 		break;
763 	default:
764 		BUG();
765 	}
766 }
767 
768 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
769 				     int level)
770 {
771 	BUG_ON(!lockres);
772 
773 	switch(level) {
774 	case DLM_LOCK_EX:
775 		BUG_ON(!lockres->l_ex_holders);
776 		lockres->l_ex_holders--;
777 		break;
778 	case DLM_LOCK_PR:
779 		BUG_ON(!lockres->l_ro_holders);
780 		lockres->l_ro_holders--;
781 		break;
782 	default:
783 		BUG();
784 	}
785 }
786 
787 /* WARNING: This function lives in a world where the only three lock
788  * levels are EX, PR, and NL. It *will* have to be adjusted when more
789  * lock types are added. */
790 static inline int ocfs2_highest_compat_lock_level(int level)
791 {
792 	int new_level = DLM_LOCK_EX;
793 
794 	if (level == DLM_LOCK_EX)
795 		new_level = DLM_LOCK_NL;
796 	else if (level == DLM_LOCK_PR)
797 		new_level = DLM_LOCK_PR;
798 	return new_level;
799 }
800 
801 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
802 			      unsigned long newflags)
803 {
804 	struct ocfs2_mask_waiter *mw, *tmp;
805 
806  	assert_spin_locked(&lockres->l_lock);
807 
808 	lockres->l_flags = newflags;
809 
810 	list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
811 		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
812 			continue;
813 
814 		list_del_init(&mw->mw_item);
815 		mw->mw_status = 0;
816 		complete(&mw->mw_complete);
817 	}
818 }
819 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
820 {
821 	lockres_set_flags(lockres, lockres->l_flags | or);
822 }
823 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
824 				unsigned long clear)
825 {
826 	lockres_set_flags(lockres, lockres->l_flags & ~clear);
827 }
828 
829 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
830 {
831 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
832 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
833 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
834 	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
835 
836 	lockres->l_level = lockres->l_requested;
837 	if (lockres->l_level <=
838 	    ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
839 		lockres->l_blocking = DLM_LOCK_NL;
840 		lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
841 	}
842 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
843 }
844 
845 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
846 {
847 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
848 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
849 
850 	/* Convert from RO to EX doesn't really need anything as our
851 	 * information is already up to data. Convert from NL to
852 	 * *anything* however should mark ourselves as needing an
853 	 * update */
854 	if (lockres->l_level == DLM_LOCK_NL &&
855 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
856 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
857 
858 	lockres->l_level = lockres->l_requested;
859 
860 	/*
861 	 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
862 	 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
863 	 * downconverting the lock before the upconvert has fully completed.
864 	 */
865 	lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
866 
867 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
868 }
869 
870 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
871 {
872 	BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
873 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
874 
875 	if (lockres->l_requested > DLM_LOCK_NL &&
876 	    !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
877 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
878 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
879 
880 	lockres->l_level = lockres->l_requested;
881 	lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
882 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
883 }
884 
885 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
886 				     int level)
887 {
888 	int needs_downconvert = 0;
889 
890 	assert_spin_locked(&lockres->l_lock);
891 
892 	if (level > lockres->l_blocking) {
893 		/* only schedule a downconvert if we haven't already scheduled
894 		 * one that goes low enough to satisfy the level we're
895 		 * blocking.  this also catches the case where we get
896 		 * duplicate BASTs */
897 		if (ocfs2_highest_compat_lock_level(level) <
898 		    ocfs2_highest_compat_lock_level(lockres->l_blocking))
899 			needs_downconvert = 1;
900 
901 		lockres->l_blocking = level;
902 	}
903 
904 	mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
905 	     lockres->l_name, level, lockres->l_level, lockres->l_blocking,
906 	     needs_downconvert);
907 
908 	if (needs_downconvert)
909 		lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
910 	mlog(0, "needs_downconvert = %d\n", needs_downconvert);
911 	return needs_downconvert;
912 }
913 
914 /*
915  * OCFS2_LOCK_PENDING and l_pending_gen.
916  *
917  * Why does OCFS2_LOCK_PENDING exist?  To close a race between setting
918  * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock().  See ocfs2_unblock_lock()
919  * for more details on the race.
920  *
921  * OCFS2_LOCK_PENDING closes the race quite nicely.  However, it introduces
922  * a race on itself.  In o2dlm, we can get the ast before ocfs2_dlm_lock()
923  * returns.  The ast clears OCFS2_LOCK_BUSY, and must therefore clear
924  * OCFS2_LOCK_PENDING at the same time.  When ocfs2_dlm_lock() returns,
925  * the caller is going to try to clear PENDING again.  If nothing else is
926  * happening, __lockres_clear_pending() sees PENDING is unset and does
927  * nothing.
928  *
929  * But what if another path (eg downconvert thread) has just started a
930  * new locking action?  The other path has re-set PENDING.  Our path
931  * cannot clear PENDING, because that will re-open the original race
932  * window.
933  *
934  * [Example]
935  *
936  * ocfs2_meta_lock()
937  *  ocfs2_cluster_lock()
938  *   set BUSY
939  *   set PENDING
940  *   drop l_lock
941  *   ocfs2_dlm_lock()
942  *    ocfs2_locking_ast()		ocfs2_downconvert_thread()
943  *     clear PENDING			 ocfs2_unblock_lock()
944  *					  take_l_lock
945  *					  !BUSY
946  *					  ocfs2_prepare_downconvert()
947  *					   set BUSY
948  *					   set PENDING
949  *					  drop l_lock
950  *   take l_lock
951  *   clear PENDING
952  *   drop l_lock
953  *			<window>
954  *					  ocfs2_dlm_lock()
955  *
956  * So as you can see, we now have a window where l_lock is not held,
957  * PENDING is not set, and ocfs2_dlm_lock() has not been called.
958  *
959  * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
960  * set by ocfs2_prepare_downconvert().  That wasn't nice.
961  *
962  * To solve this we introduce l_pending_gen.  A call to
963  * lockres_clear_pending() will only do so when it is passed a generation
964  * number that matches the lockres.  lockres_set_pending() will return the
965  * current generation number.  When ocfs2_cluster_lock() goes to clear
966  * PENDING, it passes the generation it got from set_pending().  In our
967  * example above, the generation numbers will *not* match.  Thus,
968  * ocfs2_cluster_lock() will not clear the PENDING set by
969  * ocfs2_prepare_downconvert().
970  */
971 
972 /* Unlocked version for ocfs2_locking_ast() */
973 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
974 				    unsigned int generation,
975 				    struct ocfs2_super *osb)
976 {
977 	assert_spin_locked(&lockres->l_lock);
978 
979 	/*
980 	 * The ast and locking functions can race us here.  The winner
981 	 * will clear pending, the loser will not.
982 	 */
983 	if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
984 	    (lockres->l_pending_gen != generation))
985 		return;
986 
987 	lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
988 	lockres->l_pending_gen++;
989 
990 	/*
991 	 * The downconvert thread may have skipped us because we
992 	 * were PENDING.  Wake it up.
993 	 */
994 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
995 		ocfs2_wake_downconvert_thread(osb);
996 }
997 
998 /* Locked version for callers of ocfs2_dlm_lock() */
999 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1000 				  unsigned int generation,
1001 				  struct ocfs2_super *osb)
1002 {
1003 	unsigned long flags;
1004 
1005 	spin_lock_irqsave(&lockres->l_lock, flags);
1006 	__lockres_clear_pending(lockres, generation, osb);
1007 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1008 }
1009 
1010 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1011 {
1012 	assert_spin_locked(&lockres->l_lock);
1013 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1014 
1015 	lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1016 
1017 	return lockres->l_pending_gen;
1018 }
1019 
1020 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1021 {
1022 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1023 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1024 	int needs_downconvert;
1025 	unsigned long flags;
1026 
1027 	BUG_ON(level <= DLM_LOCK_NL);
1028 
1029 	mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1030 	     "type %s\n", lockres->l_name, level, lockres->l_level,
1031 	     ocfs2_lock_type_string(lockres->l_type));
1032 
1033 	/*
1034 	 * We can skip the bast for locks which don't enable caching -
1035 	 * they'll be dropped at the earliest possible time anyway.
1036 	 */
1037 	if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1038 		return;
1039 
1040 	spin_lock_irqsave(&lockres->l_lock, flags);
1041 	needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1042 	if (needs_downconvert)
1043 		ocfs2_schedule_blocked_lock(osb, lockres);
1044 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1045 
1046 	wake_up(&lockres->l_event);
1047 
1048 	ocfs2_wake_downconvert_thread(osb);
1049 }
1050 
1051 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1052 {
1053 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1054 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1055 	unsigned long flags;
1056 	int status;
1057 
1058 	spin_lock_irqsave(&lockres->l_lock, flags);
1059 
1060 	status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1061 
1062 	if (status == -EAGAIN) {
1063 		lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1064 		goto out;
1065 	}
1066 
1067 	if (status) {
1068 		mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1069 		     lockres->l_name, status);
1070 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1071 		return;
1072 	}
1073 
1074 	mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1075 	     "level %d => %d\n", lockres->l_name, lockres->l_action,
1076 	     lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1077 
1078 	switch(lockres->l_action) {
1079 	case OCFS2_AST_ATTACH:
1080 		ocfs2_generic_handle_attach_action(lockres);
1081 		lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1082 		break;
1083 	case OCFS2_AST_CONVERT:
1084 		ocfs2_generic_handle_convert_action(lockres);
1085 		break;
1086 	case OCFS2_AST_DOWNCONVERT:
1087 		ocfs2_generic_handle_downconvert_action(lockres);
1088 		break;
1089 	default:
1090 		mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1091 		     "flags 0x%lx, unlock: %u\n",
1092 		     lockres->l_name, lockres->l_action, lockres->l_flags,
1093 		     lockres->l_unlock_action);
1094 		BUG();
1095 	}
1096 out:
1097 	/* set it to something invalid so if we get called again we
1098 	 * can catch it. */
1099 	lockres->l_action = OCFS2_AST_INVALID;
1100 
1101 	/* Did we try to cancel this lock?  Clear that state */
1102 	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1103 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1104 
1105 	/*
1106 	 * We may have beaten the locking functions here.  We certainly
1107 	 * know that dlm_lock() has been called :-)
1108 	 * Because we can't have two lock calls in flight at once, we
1109 	 * can use lockres->l_pending_gen.
1110 	 */
1111 	__lockres_clear_pending(lockres, lockres->l_pending_gen,  osb);
1112 
1113 	wake_up(&lockres->l_event);
1114 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1115 }
1116 
1117 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1118 {
1119 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1120 	unsigned long flags;
1121 
1122 	mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1123 	     lockres->l_name, lockres->l_unlock_action);
1124 
1125 	spin_lock_irqsave(&lockres->l_lock, flags);
1126 	if (error) {
1127 		mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1128 		     "unlock_action %d\n", error, lockres->l_name,
1129 		     lockres->l_unlock_action);
1130 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1131 		return;
1132 	}
1133 
1134 	switch(lockres->l_unlock_action) {
1135 	case OCFS2_UNLOCK_CANCEL_CONVERT:
1136 		mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1137 		lockres->l_action = OCFS2_AST_INVALID;
1138 		/* Downconvert thread may have requeued this lock, we
1139 		 * need to wake it. */
1140 		if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1141 			ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1142 		break;
1143 	case OCFS2_UNLOCK_DROP_LOCK:
1144 		lockres->l_level = DLM_LOCK_IV;
1145 		break;
1146 	default:
1147 		BUG();
1148 	}
1149 
1150 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1151 	lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1152 	wake_up(&lockres->l_event);
1153 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1154 }
1155 
1156 /*
1157  * This is the filesystem locking protocol.  It provides the lock handling
1158  * hooks for the underlying DLM.  It has a maximum version number.
1159  * The version number allows interoperability with systems running at
1160  * the same major number and an equal or smaller minor number.
1161  *
1162  * Whenever the filesystem does new things with locks (adds or removes a
1163  * lock, orders them differently, does different things underneath a lock),
1164  * the version must be changed.  The protocol is negotiated when joining
1165  * the dlm domain.  A node may join the domain if its major version is
1166  * identical to all other nodes and its minor version is greater than
1167  * or equal to all other nodes.  When its minor version is greater than
1168  * the other nodes, it will run at the minor version specified by the
1169  * other nodes.
1170  *
1171  * If a locking change is made that will not be compatible with older
1172  * versions, the major number must be increased and the minor version set
1173  * to zero.  If a change merely adds a behavior that can be disabled when
1174  * speaking to older versions, the minor version must be increased.  If a
1175  * change adds a fully backwards compatible change (eg, LVB changes that
1176  * are just ignored by older versions), the version does not need to be
1177  * updated.
1178  */
1179 static struct ocfs2_locking_protocol lproto = {
1180 	.lp_max_version = {
1181 		.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1182 		.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1183 	},
1184 	.lp_lock_ast		= ocfs2_locking_ast,
1185 	.lp_blocking_ast	= ocfs2_blocking_ast,
1186 	.lp_unlock_ast		= ocfs2_unlock_ast,
1187 };
1188 
1189 void ocfs2_set_locking_protocol(void)
1190 {
1191 	ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1192 }
1193 
1194 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1195 						int convert)
1196 {
1197 	unsigned long flags;
1198 
1199 	spin_lock_irqsave(&lockres->l_lock, flags);
1200 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1201 	lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1202 	if (convert)
1203 		lockres->l_action = OCFS2_AST_INVALID;
1204 	else
1205 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1206 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1207 
1208 	wake_up(&lockres->l_event);
1209 }
1210 
1211 /* Note: If we detect another process working on the lock (i.e.,
1212  * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1213  * to do the right thing in that case.
1214  */
1215 static int ocfs2_lock_create(struct ocfs2_super *osb,
1216 			     struct ocfs2_lock_res *lockres,
1217 			     int level,
1218 			     u32 dlm_flags)
1219 {
1220 	int ret = 0;
1221 	unsigned long flags;
1222 	unsigned int gen;
1223 
1224 	mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1225 	     dlm_flags);
1226 
1227 	spin_lock_irqsave(&lockres->l_lock, flags);
1228 	if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1229 	    (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1230 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1231 		goto bail;
1232 	}
1233 
1234 	lockres->l_action = OCFS2_AST_ATTACH;
1235 	lockres->l_requested = level;
1236 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1237 	gen = lockres_set_pending(lockres);
1238 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1239 
1240 	ret = ocfs2_dlm_lock(osb->cconn,
1241 			     level,
1242 			     &lockres->l_lksb,
1243 			     dlm_flags,
1244 			     lockres->l_name,
1245 			     OCFS2_LOCK_ID_MAX_LEN - 1);
1246 	lockres_clear_pending(lockres, gen, osb);
1247 	if (ret) {
1248 		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1249 		ocfs2_recover_from_dlm_error(lockres, 1);
1250 	}
1251 
1252 	mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1253 
1254 bail:
1255 	return ret;
1256 }
1257 
1258 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1259 					int flag)
1260 {
1261 	unsigned long flags;
1262 	int ret;
1263 
1264 	spin_lock_irqsave(&lockres->l_lock, flags);
1265 	ret = lockres->l_flags & flag;
1266 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1267 
1268 	return ret;
1269 }
1270 
1271 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1272 
1273 {
1274 	wait_event(lockres->l_event,
1275 		   !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1276 }
1277 
1278 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1279 
1280 {
1281 	wait_event(lockres->l_event,
1282 		   !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1283 }
1284 
1285 /* predict what lock level we'll be dropping down to on behalf
1286  * of another node, and return true if the currently wanted
1287  * level will be compatible with it. */
1288 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1289 						     int wanted)
1290 {
1291 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1292 
1293 	return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1294 }
1295 
1296 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1297 {
1298 	INIT_LIST_HEAD(&mw->mw_item);
1299 	init_completion(&mw->mw_complete);
1300 	ocfs2_init_start_time(mw);
1301 }
1302 
1303 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1304 {
1305 	wait_for_completion(&mw->mw_complete);
1306 	/* Re-arm the completion in case we want to wait on it again */
1307 	INIT_COMPLETION(mw->mw_complete);
1308 	return mw->mw_status;
1309 }
1310 
1311 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1312 				    struct ocfs2_mask_waiter *mw,
1313 				    unsigned long mask,
1314 				    unsigned long goal)
1315 {
1316 	BUG_ON(!list_empty(&mw->mw_item));
1317 
1318 	assert_spin_locked(&lockres->l_lock);
1319 
1320 	list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1321 	mw->mw_mask = mask;
1322 	mw->mw_goal = goal;
1323 }
1324 
1325 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1326  * if the mask still hadn't reached its goal */
1327 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1328 				      struct ocfs2_mask_waiter *mw)
1329 {
1330 	unsigned long flags;
1331 	int ret = 0;
1332 
1333 	spin_lock_irqsave(&lockres->l_lock, flags);
1334 	if (!list_empty(&mw->mw_item)) {
1335 		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1336 			ret = -EBUSY;
1337 
1338 		list_del_init(&mw->mw_item);
1339 		init_completion(&mw->mw_complete);
1340 	}
1341 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1342 
1343 	return ret;
1344 
1345 }
1346 
1347 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1348 					     struct ocfs2_lock_res *lockres)
1349 {
1350 	int ret;
1351 
1352 	ret = wait_for_completion_interruptible(&mw->mw_complete);
1353 	if (ret)
1354 		lockres_remove_mask_waiter(lockres, mw);
1355 	else
1356 		ret = mw->mw_status;
1357 	/* Re-arm the completion in case we want to wait on it again */
1358 	INIT_COMPLETION(mw->mw_complete);
1359 	return ret;
1360 }
1361 
1362 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1363 				struct ocfs2_lock_res *lockres,
1364 				int level,
1365 				u32 lkm_flags,
1366 				int arg_flags,
1367 				int l_subclass,
1368 				unsigned long caller_ip)
1369 {
1370 	struct ocfs2_mask_waiter mw;
1371 	int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1372 	int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1373 	unsigned long flags;
1374 	unsigned int gen;
1375 	int noqueue_attempted = 0;
1376 
1377 	ocfs2_init_mask_waiter(&mw);
1378 
1379 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1380 		lkm_flags |= DLM_LKF_VALBLK;
1381 
1382 again:
1383 	wait = 0;
1384 
1385 	spin_lock_irqsave(&lockres->l_lock, flags);
1386 
1387 	if (catch_signals && signal_pending(current)) {
1388 		ret = -ERESTARTSYS;
1389 		goto unlock;
1390 	}
1391 
1392 	mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1393 			"Cluster lock called on freeing lockres %s! flags "
1394 			"0x%lx\n", lockres->l_name, lockres->l_flags);
1395 
1396 	/* We only compare against the currently granted level
1397 	 * here. If the lock is blocked waiting on a downconvert,
1398 	 * we'll get caught below. */
1399 	if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1400 	    level > lockres->l_level) {
1401 		/* is someone sitting in dlm_lock? If so, wait on
1402 		 * them. */
1403 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1404 		wait = 1;
1405 		goto unlock;
1406 	}
1407 
1408 	if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1409 		/*
1410 		 * We've upconverted. If the lock now has a level we can
1411 		 * work with, we take it. If, however, the lock is not at the
1412 		 * required level, we go thru the full cycle. One way this could
1413 		 * happen is if a process requesting an upconvert to PR is
1414 		 * closely followed by another requesting upconvert to an EX.
1415 		 * If the process requesting EX lands here, we want it to
1416 		 * continue attempting to upconvert and let the process
1417 		 * requesting PR take the lock.
1418 		 * If multiple processes request upconvert to PR, the first one
1419 		 * here will take the lock. The others will have to go thru the
1420 		 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1421 		 * downconvert request.
1422 		 */
1423 		if (level <= lockres->l_level)
1424 			goto update_holders;
1425 	}
1426 
1427 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1428 	    !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1429 		/* is the lock is currently blocked on behalf of
1430 		 * another node */
1431 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1432 		wait = 1;
1433 		goto unlock;
1434 	}
1435 
1436 	if (level > lockres->l_level) {
1437 		if (noqueue_attempted > 0) {
1438 			ret = -EAGAIN;
1439 			goto unlock;
1440 		}
1441 		if (lkm_flags & DLM_LKF_NOQUEUE)
1442 			noqueue_attempted = 1;
1443 
1444 		if (lockres->l_action != OCFS2_AST_INVALID)
1445 			mlog(ML_ERROR, "lockres %s has action %u pending\n",
1446 			     lockres->l_name, lockres->l_action);
1447 
1448 		if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1449 			lockres->l_action = OCFS2_AST_ATTACH;
1450 			lkm_flags &= ~DLM_LKF_CONVERT;
1451 		} else {
1452 			lockres->l_action = OCFS2_AST_CONVERT;
1453 			lkm_flags |= DLM_LKF_CONVERT;
1454 		}
1455 
1456 		lockres->l_requested = level;
1457 		lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1458 		gen = lockres_set_pending(lockres);
1459 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1460 
1461 		BUG_ON(level == DLM_LOCK_IV);
1462 		BUG_ON(level == DLM_LOCK_NL);
1463 
1464 		mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1465 		     lockres->l_name, lockres->l_level, level);
1466 
1467 		/* call dlm_lock to upgrade lock now */
1468 		ret = ocfs2_dlm_lock(osb->cconn,
1469 				     level,
1470 				     &lockres->l_lksb,
1471 				     lkm_flags,
1472 				     lockres->l_name,
1473 				     OCFS2_LOCK_ID_MAX_LEN - 1);
1474 		lockres_clear_pending(lockres, gen, osb);
1475 		if (ret) {
1476 			if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1477 			    (ret != -EAGAIN)) {
1478 				ocfs2_log_dlm_error("ocfs2_dlm_lock",
1479 						    ret, lockres);
1480 			}
1481 			ocfs2_recover_from_dlm_error(lockres, 1);
1482 			goto out;
1483 		}
1484 
1485 		mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1486 		     lockres->l_name);
1487 
1488 		/* At this point we've gone inside the dlm and need to
1489 		 * complete our work regardless. */
1490 		catch_signals = 0;
1491 
1492 		/* wait for busy to clear and carry on */
1493 		goto again;
1494 	}
1495 
1496 update_holders:
1497 	/* Ok, if we get here then we're good to go. */
1498 	ocfs2_inc_holders(lockres, level);
1499 
1500 	ret = 0;
1501 unlock:
1502 	lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1503 
1504 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1505 out:
1506 	/*
1507 	 * This is helping work around a lock inversion between the page lock
1508 	 * and dlm locks.  One path holds the page lock while calling aops
1509 	 * which block acquiring dlm locks.  The voting thread holds dlm
1510 	 * locks while acquiring page locks while down converting data locks.
1511 	 * This block is helping an aop path notice the inversion and back
1512 	 * off to unlock its page lock before trying the dlm lock again.
1513 	 */
1514 	if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1515 	    mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1516 		wait = 0;
1517 		if (lockres_remove_mask_waiter(lockres, &mw))
1518 			ret = -EAGAIN;
1519 		else
1520 			goto again;
1521 	}
1522 	if (wait) {
1523 		ret = ocfs2_wait_for_mask(&mw);
1524 		if (ret == 0)
1525 			goto again;
1526 		mlog_errno(ret);
1527 	}
1528 	ocfs2_update_lock_stats(lockres, level, &mw, ret);
1529 
1530 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1531 	if (!ret && lockres->l_lockdep_map.key != NULL) {
1532 		if (level == DLM_LOCK_PR)
1533 			rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1534 				!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1535 				caller_ip);
1536 		else
1537 			rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1538 				!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1539 				caller_ip);
1540 	}
1541 #endif
1542 	return ret;
1543 }
1544 
1545 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1546 				     struct ocfs2_lock_res *lockres,
1547 				     int level,
1548 				     u32 lkm_flags,
1549 				     int arg_flags)
1550 {
1551 	return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1552 				    0, _RET_IP_);
1553 }
1554 
1555 
1556 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1557 				   struct ocfs2_lock_res *lockres,
1558 				   int level,
1559 				   unsigned long caller_ip)
1560 {
1561 	unsigned long flags;
1562 
1563 	spin_lock_irqsave(&lockres->l_lock, flags);
1564 	ocfs2_dec_holders(lockres, level);
1565 	ocfs2_downconvert_on_unlock(osb, lockres);
1566 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1567 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1568 	if (lockres->l_lockdep_map.key != NULL)
1569 		rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1570 #endif
1571 }
1572 
1573 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1574 				 struct ocfs2_lock_res *lockres,
1575 				 int ex,
1576 				 int local)
1577 {
1578 	int level =  ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1579 	unsigned long flags;
1580 	u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1581 
1582 	spin_lock_irqsave(&lockres->l_lock, flags);
1583 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1584 	lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1585 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1586 
1587 	return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1588 }
1589 
1590 /* Grants us an EX lock on the data and metadata resources, skipping
1591  * the normal cluster directory lookup. Use this ONLY on newly created
1592  * inodes which other nodes can't possibly see, and which haven't been
1593  * hashed in the inode hash yet. This can give us a good performance
1594  * increase as it'll skip the network broadcast normally associated
1595  * with creating a new lock resource. */
1596 int ocfs2_create_new_inode_locks(struct inode *inode)
1597 {
1598 	int ret;
1599 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1600 
1601 	BUG_ON(!inode);
1602 	BUG_ON(!ocfs2_inode_is_new(inode));
1603 
1604 	mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1605 
1606 	/* NOTE: That we don't increment any of the holder counts, nor
1607 	 * do we add anything to a journal handle. Since this is
1608 	 * supposed to be a new inode which the cluster doesn't know
1609 	 * about yet, there is no need to.  As far as the LVB handling
1610 	 * is concerned, this is basically like acquiring an EX lock
1611 	 * on a resource which has an invalid one -- we'll set it
1612 	 * valid when we release the EX. */
1613 
1614 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1615 	if (ret) {
1616 		mlog_errno(ret);
1617 		goto bail;
1618 	}
1619 
1620 	/*
1621 	 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1622 	 * don't use a generation in their lock names.
1623 	 */
1624 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1625 	if (ret) {
1626 		mlog_errno(ret);
1627 		goto bail;
1628 	}
1629 
1630 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1631 	if (ret) {
1632 		mlog_errno(ret);
1633 		goto bail;
1634 	}
1635 
1636 bail:
1637 	return ret;
1638 }
1639 
1640 int ocfs2_rw_lock(struct inode *inode, int write)
1641 {
1642 	int status, level;
1643 	struct ocfs2_lock_res *lockres;
1644 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1645 
1646 	BUG_ON(!inode);
1647 
1648 	mlog(0, "inode %llu take %s RW lock\n",
1649 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1650 	     write ? "EXMODE" : "PRMODE");
1651 
1652 	if (ocfs2_mount_local(osb))
1653 		return 0;
1654 
1655 	lockres = &OCFS2_I(inode)->ip_rw_lockres;
1656 
1657 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1658 
1659 	status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1660 				    0);
1661 	if (status < 0)
1662 		mlog_errno(status);
1663 
1664 	return status;
1665 }
1666 
1667 void ocfs2_rw_unlock(struct inode *inode, int write)
1668 {
1669 	int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1670 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1671 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1672 
1673 	mlog(0, "inode %llu drop %s RW lock\n",
1674 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1675 	     write ? "EXMODE" : "PRMODE");
1676 
1677 	if (!ocfs2_mount_local(osb))
1678 		ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1679 }
1680 
1681 /*
1682  * ocfs2_open_lock always get PR mode lock.
1683  */
1684 int ocfs2_open_lock(struct inode *inode)
1685 {
1686 	int status = 0;
1687 	struct ocfs2_lock_res *lockres;
1688 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1689 
1690 	BUG_ON(!inode);
1691 
1692 	mlog(0, "inode %llu take PRMODE open lock\n",
1693 	     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1694 
1695 	if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1696 		goto out;
1697 
1698 	lockres = &OCFS2_I(inode)->ip_open_lockres;
1699 
1700 	status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1701 				    DLM_LOCK_PR, 0, 0);
1702 	if (status < 0)
1703 		mlog_errno(status);
1704 
1705 out:
1706 	return status;
1707 }
1708 
1709 int ocfs2_try_open_lock(struct inode *inode, int write)
1710 {
1711 	int status = 0, level;
1712 	struct ocfs2_lock_res *lockres;
1713 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1714 
1715 	BUG_ON(!inode);
1716 
1717 	mlog(0, "inode %llu try to take %s open lock\n",
1718 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1719 	     write ? "EXMODE" : "PRMODE");
1720 
1721 	if (ocfs2_is_hard_readonly(osb)) {
1722 		if (write)
1723 			status = -EROFS;
1724 		goto out;
1725 	}
1726 
1727 	if (ocfs2_mount_local(osb))
1728 		goto out;
1729 
1730 	lockres = &OCFS2_I(inode)->ip_open_lockres;
1731 
1732 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1733 
1734 	/*
1735 	 * The file system may already holding a PRMODE/EXMODE open lock.
1736 	 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1737 	 * other nodes and the -EAGAIN will indicate to the caller that
1738 	 * this inode is still in use.
1739 	 */
1740 	status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1741 				    level, DLM_LKF_NOQUEUE, 0);
1742 
1743 out:
1744 	return status;
1745 }
1746 
1747 /*
1748  * ocfs2_open_unlock unlock PR and EX mode open locks.
1749  */
1750 void ocfs2_open_unlock(struct inode *inode)
1751 {
1752 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1753 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1754 
1755 	mlog(0, "inode %llu drop open lock\n",
1756 	     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1757 
1758 	if (ocfs2_mount_local(osb))
1759 		goto out;
1760 
1761 	if(lockres->l_ro_holders)
1762 		ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1763 				     DLM_LOCK_PR);
1764 	if(lockres->l_ex_holders)
1765 		ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1766 				     DLM_LOCK_EX);
1767 
1768 out:
1769 	return;
1770 }
1771 
1772 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1773 				     int level)
1774 {
1775 	int ret;
1776 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1777 	unsigned long flags;
1778 	struct ocfs2_mask_waiter mw;
1779 
1780 	ocfs2_init_mask_waiter(&mw);
1781 
1782 retry_cancel:
1783 	spin_lock_irqsave(&lockres->l_lock, flags);
1784 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1785 		ret = ocfs2_prepare_cancel_convert(osb, lockres);
1786 		if (ret) {
1787 			spin_unlock_irqrestore(&lockres->l_lock, flags);
1788 			ret = ocfs2_cancel_convert(osb, lockres);
1789 			if (ret < 0) {
1790 				mlog_errno(ret);
1791 				goto out;
1792 			}
1793 			goto retry_cancel;
1794 		}
1795 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1796 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1797 
1798 		ocfs2_wait_for_mask(&mw);
1799 		goto retry_cancel;
1800 	}
1801 
1802 	ret = -ERESTARTSYS;
1803 	/*
1804 	 * We may still have gotten the lock, in which case there's no
1805 	 * point to restarting the syscall.
1806 	 */
1807 	if (lockres->l_level == level)
1808 		ret = 0;
1809 
1810 	mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1811 	     lockres->l_flags, lockres->l_level, lockres->l_action);
1812 
1813 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1814 
1815 out:
1816 	return ret;
1817 }
1818 
1819 /*
1820  * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1821  * flock() calls. The locking approach this requires is sufficiently
1822  * different from all other cluster lock types that we implement a
1823  * separate path to the "low-level" dlm calls. In particular:
1824  *
1825  * - No optimization of lock levels is done - we take at exactly
1826  *   what's been requested.
1827  *
1828  * - No lock caching is employed. We immediately downconvert to
1829  *   no-lock at unlock time. This also means flock locks never go on
1830  *   the blocking list).
1831  *
1832  * - Since userspace can trivially deadlock itself with flock, we make
1833  *   sure to allow cancellation of a misbehaving applications flock()
1834  *   request.
1835  *
1836  * - Access to any flock lockres doesn't require concurrency, so we
1837  *   can simplify the code by requiring the caller to guarantee
1838  *   serialization of dlmglue flock calls.
1839  */
1840 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1841 {
1842 	int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1843 	unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1844 	unsigned long flags;
1845 	struct ocfs2_file_private *fp = file->private_data;
1846 	struct ocfs2_lock_res *lockres = &fp->fp_flock;
1847 	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1848 	struct ocfs2_mask_waiter mw;
1849 
1850 	ocfs2_init_mask_waiter(&mw);
1851 
1852 	if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1853 	    (lockres->l_level > DLM_LOCK_NL)) {
1854 		mlog(ML_ERROR,
1855 		     "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1856 		     "level: %u\n", lockres->l_name, lockres->l_flags,
1857 		     lockres->l_level);
1858 		return -EINVAL;
1859 	}
1860 
1861 	spin_lock_irqsave(&lockres->l_lock, flags);
1862 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1863 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1864 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1865 
1866 		/*
1867 		 * Get the lock at NLMODE to start - that way we
1868 		 * can cancel the upconvert request if need be.
1869 		 */
1870 		ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1871 		if (ret < 0) {
1872 			mlog_errno(ret);
1873 			goto out;
1874 		}
1875 
1876 		ret = ocfs2_wait_for_mask(&mw);
1877 		if (ret) {
1878 			mlog_errno(ret);
1879 			goto out;
1880 		}
1881 		spin_lock_irqsave(&lockres->l_lock, flags);
1882 	}
1883 
1884 	lockres->l_action = OCFS2_AST_CONVERT;
1885 	lkm_flags |= DLM_LKF_CONVERT;
1886 	lockres->l_requested = level;
1887 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1888 
1889 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1890 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1891 
1892 	ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1893 			     lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
1894 	if (ret) {
1895 		if (!trylock || (ret != -EAGAIN)) {
1896 			ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1897 			ret = -EINVAL;
1898 		}
1899 
1900 		ocfs2_recover_from_dlm_error(lockres, 1);
1901 		lockres_remove_mask_waiter(lockres, &mw);
1902 		goto out;
1903 	}
1904 
1905 	ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1906 	if (ret == -ERESTARTSYS) {
1907 		/*
1908 		 * Userspace can cause deadlock itself with
1909 		 * flock(). Current behavior locally is to allow the
1910 		 * deadlock, but abort the system call if a signal is
1911 		 * received. We follow this example, otherwise a
1912 		 * poorly written program could sit in kernel until
1913 		 * reboot.
1914 		 *
1915 		 * Handling this is a bit more complicated for Ocfs2
1916 		 * though. We can't exit this function with an
1917 		 * outstanding lock request, so a cancel convert is
1918 		 * required. We intentionally overwrite 'ret' - if the
1919 		 * cancel fails and the lock was granted, it's easier
1920 		 * to just bubble success back up to the user.
1921 		 */
1922 		ret = ocfs2_flock_handle_signal(lockres, level);
1923 	} else if (!ret && (level > lockres->l_level)) {
1924 		/* Trylock failed asynchronously */
1925 		BUG_ON(!trylock);
1926 		ret = -EAGAIN;
1927 	}
1928 
1929 out:
1930 
1931 	mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1932 	     lockres->l_name, ex, trylock, ret);
1933 	return ret;
1934 }
1935 
1936 void ocfs2_file_unlock(struct file *file)
1937 {
1938 	int ret;
1939 	unsigned int gen;
1940 	unsigned long flags;
1941 	struct ocfs2_file_private *fp = file->private_data;
1942 	struct ocfs2_lock_res *lockres = &fp->fp_flock;
1943 	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1944 	struct ocfs2_mask_waiter mw;
1945 
1946 	ocfs2_init_mask_waiter(&mw);
1947 
1948 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
1949 		return;
1950 
1951 	if (lockres->l_level == DLM_LOCK_NL)
1952 		return;
1953 
1954 	mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1955 	     lockres->l_name, lockres->l_flags, lockres->l_level,
1956 	     lockres->l_action);
1957 
1958 	spin_lock_irqsave(&lockres->l_lock, flags);
1959 	/*
1960 	 * Fake a blocking ast for the downconvert code.
1961 	 */
1962 	lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
1963 	lockres->l_blocking = DLM_LOCK_EX;
1964 
1965 	gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
1966 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1967 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1968 
1969 	ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
1970 	if (ret) {
1971 		mlog_errno(ret);
1972 		return;
1973 	}
1974 
1975 	ret = ocfs2_wait_for_mask(&mw);
1976 	if (ret)
1977 		mlog_errno(ret);
1978 }
1979 
1980 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1981 					struct ocfs2_lock_res *lockres)
1982 {
1983 	int kick = 0;
1984 
1985 	/* If we know that another node is waiting on our lock, kick
1986 	 * the downconvert thread * pre-emptively when we reach a release
1987 	 * condition. */
1988 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1989 		switch(lockres->l_blocking) {
1990 		case DLM_LOCK_EX:
1991 			if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1992 				kick = 1;
1993 			break;
1994 		case DLM_LOCK_PR:
1995 			if (!lockres->l_ex_holders)
1996 				kick = 1;
1997 			break;
1998 		default:
1999 			BUG();
2000 		}
2001 	}
2002 
2003 	if (kick)
2004 		ocfs2_wake_downconvert_thread(osb);
2005 }
2006 
2007 #define OCFS2_SEC_BITS   34
2008 #define OCFS2_SEC_SHIFT  (64 - 34)
2009 #define OCFS2_NSEC_MASK  ((1ULL << OCFS2_SEC_SHIFT) - 1)
2010 
2011 /* LVB only has room for 64 bits of time here so we pack it for
2012  * now. */
2013 static u64 ocfs2_pack_timespec(struct timespec *spec)
2014 {
2015 	u64 res;
2016 	u64 sec = spec->tv_sec;
2017 	u32 nsec = spec->tv_nsec;
2018 
2019 	res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
2020 
2021 	return res;
2022 }
2023 
2024 /* Call this with the lockres locked. I am reasonably sure we don't
2025  * need ip_lock in this function as anyone who would be changing those
2026  * values is supposed to be blocked in ocfs2_inode_lock right now. */
2027 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2028 {
2029 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2030 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2031 	struct ocfs2_meta_lvb *lvb;
2032 
2033 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2034 
2035 	/*
2036 	 * Invalidate the LVB of a deleted inode - this way other
2037 	 * nodes are forced to go to disk and discover the new inode
2038 	 * status.
2039 	 */
2040 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
2041 		lvb->lvb_version = 0;
2042 		goto out;
2043 	}
2044 
2045 	lvb->lvb_version   = OCFS2_LVB_VERSION;
2046 	lvb->lvb_isize	   = cpu_to_be64(i_size_read(inode));
2047 	lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
2048 	lvb->lvb_iuid      = cpu_to_be32(i_uid_read(inode));
2049 	lvb->lvb_igid      = cpu_to_be32(i_gid_read(inode));
2050 	lvb->lvb_imode     = cpu_to_be16(inode->i_mode);
2051 	lvb->lvb_inlink    = cpu_to_be16(inode->i_nlink);
2052 	lvb->lvb_iatime_packed  =
2053 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
2054 	lvb->lvb_ictime_packed =
2055 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
2056 	lvb->lvb_imtime_packed =
2057 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2058 	lvb->lvb_iattr    = cpu_to_be32(oi->ip_attr);
2059 	lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2060 	lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2061 
2062 out:
2063 	mlog_meta_lvb(0, lockres);
2064 }
2065 
2066 static void ocfs2_unpack_timespec(struct timespec *spec,
2067 				  u64 packed_time)
2068 {
2069 	spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2070 	spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2071 }
2072 
2073 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2074 {
2075 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2076 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2077 	struct ocfs2_meta_lvb *lvb;
2078 
2079 	mlog_meta_lvb(0, lockres);
2080 
2081 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2082 
2083 	/* We're safe here without the lockres lock... */
2084 	spin_lock(&oi->ip_lock);
2085 	oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2086 	i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2087 
2088 	oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2089 	oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2090 	ocfs2_set_inode_flags(inode);
2091 
2092 	/* fast-symlinks are a special case */
2093 	if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2094 		inode->i_blocks = 0;
2095 	else
2096 		inode->i_blocks = ocfs2_inode_sector_count(inode);
2097 
2098 	i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
2099 	i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
2100 	inode->i_mode    = be16_to_cpu(lvb->lvb_imode);
2101 	set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
2102 	ocfs2_unpack_timespec(&inode->i_atime,
2103 			      be64_to_cpu(lvb->lvb_iatime_packed));
2104 	ocfs2_unpack_timespec(&inode->i_mtime,
2105 			      be64_to_cpu(lvb->lvb_imtime_packed));
2106 	ocfs2_unpack_timespec(&inode->i_ctime,
2107 			      be64_to_cpu(lvb->lvb_ictime_packed));
2108 	spin_unlock(&oi->ip_lock);
2109 }
2110 
2111 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2112 					      struct ocfs2_lock_res *lockres)
2113 {
2114 	struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2115 
2116 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2117 	    && lvb->lvb_version == OCFS2_LVB_VERSION
2118 	    && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2119 		return 1;
2120 	return 0;
2121 }
2122 
2123 /* Determine whether a lock resource needs to be refreshed, and
2124  * arbitrate who gets to refresh it.
2125  *
2126  *   0 means no refresh needed.
2127  *
2128  *   > 0 means you need to refresh this and you MUST call
2129  *   ocfs2_complete_lock_res_refresh afterwards. */
2130 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2131 {
2132 	unsigned long flags;
2133 	int status = 0;
2134 
2135 refresh_check:
2136 	spin_lock_irqsave(&lockres->l_lock, flags);
2137 	if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2138 		spin_unlock_irqrestore(&lockres->l_lock, flags);
2139 		goto bail;
2140 	}
2141 
2142 	if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2143 		spin_unlock_irqrestore(&lockres->l_lock, flags);
2144 
2145 		ocfs2_wait_on_refreshing_lock(lockres);
2146 		goto refresh_check;
2147 	}
2148 
2149 	/* Ok, I'll be the one to refresh this lock. */
2150 	lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2151 	spin_unlock_irqrestore(&lockres->l_lock, flags);
2152 
2153 	status = 1;
2154 bail:
2155 	mlog(0, "status %d\n", status);
2156 	return status;
2157 }
2158 
2159 /* If status is non zero, I'll mark it as not being in refresh
2160  * anymroe, but i won't clear the needs refresh flag. */
2161 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2162 						   int status)
2163 {
2164 	unsigned long flags;
2165 
2166 	spin_lock_irqsave(&lockres->l_lock, flags);
2167 	lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2168 	if (!status)
2169 		lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2170 	spin_unlock_irqrestore(&lockres->l_lock, flags);
2171 
2172 	wake_up(&lockres->l_event);
2173 }
2174 
2175 /* may or may not return a bh if it went to disk. */
2176 static int ocfs2_inode_lock_update(struct inode *inode,
2177 				  struct buffer_head **bh)
2178 {
2179 	int status = 0;
2180 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2181 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2182 	struct ocfs2_dinode *fe;
2183 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2184 
2185 	if (ocfs2_mount_local(osb))
2186 		goto bail;
2187 
2188 	spin_lock(&oi->ip_lock);
2189 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
2190 		mlog(0, "Orphaned inode %llu was deleted while we "
2191 		     "were waiting on a lock. ip_flags = 0x%x\n",
2192 		     (unsigned long long)oi->ip_blkno, oi->ip_flags);
2193 		spin_unlock(&oi->ip_lock);
2194 		status = -ENOENT;
2195 		goto bail;
2196 	}
2197 	spin_unlock(&oi->ip_lock);
2198 
2199 	if (!ocfs2_should_refresh_lock_res(lockres))
2200 		goto bail;
2201 
2202 	/* This will discard any caching information we might have had
2203 	 * for the inode metadata. */
2204 	ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2205 
2206 	ocfs2_extent_map_trunc(inode, 0);
2207 
2208 	if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2209 		mlog(0, "Trusting LVB on inode %llu\n",
2210 		     (unsigned long long)oi->ip_blkno);
2211 		ocfs2_refresh_inode_from_lvb(inode);
2212 	} else {
2213 		/* Boo, we have to go to disk. */
2214 		/* read bh, cast, ocfs2_refresh_inode */
2215 		status = ocfs2_read_inode_block(inode, bh);
2216 		if (status < 0) {
2217 			mlog_errno(status);
2218 			goto bail_refresh;
2219 		}
2220 		fe = (struct ocfs2_dinode *) (*bh)->b_data;
2221 
2222 		/* This is a good chance to make sure we're not
2223 		 * locking an invalid object.  ocfs2_read_inode_block()
2224 		 * already checked that the inode block is sane.
2225 		 *
2226 		 * We bug on a stale inode here because we checked
2227 		 * above whether it was wiped from disk. The wiping
2228 		 * node provides a guarantee that we receive that
2229 		 * message and can mark the inode before dropping any
2230 		 * locks associated with it. */
2231 		mlog_bug_on_msg(inode->i_generation !=
2232 				le32_to_cpu(fe->i_generation),
2233 				"Invalid dinode %llu disk generation: %u "
2234 				"inode->i_generation: %u\n",
2235 				(unsigned long long)oi->ip_blkno,
2236 				le32_to_cpu(fe->i_generation),
2237 				inode->i_generation);
2238 		mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2239 				!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2240 				"Stale dinode %llu dtime: %llu flags: 0x%x\n",
2241 				(unsigned long long)oi->ip_blkno,
2242 				(unsigned long long)le64_to_cpu(fe->i_dtime),
2243 				le32_to_cpu(fe->i_flags));
2244 
2245 		ocfs2_refresh_inode(inode, fe);
2246 		ocfs2_track_lock_refresh(lockres);
2247 	}
2248 
2249 	status = 0;
2250 bail_refresh:
2251 	ocfs2_complete_lock_res_refresh(lockres, status);
2252 bail:
2253 	return status;
2254 }
2255 
2256 static int ocfs2_assign_bh(struct inode *inode,
2257 			   struct buffer_head **ret_bh,
2258 			   struct buffer_head *passed_bh)
2259 {
2260 	int status;
2261 
2262 	if (passed_bh) {
2263 		/* Ok, the update went to disk for us, use the
2264 		 * returned bh. */
2265 		*ret_bh = passed_bh;
2266 		get_bh(*ret_bh);
2267 
2268 		return 0;
2269 	}
2270 
2271 	status = ocfs2_read_inode_block(inode, ret_bh);
2272 	if (status < 0)
2273 		mlog_errno(status);
2274 
2275 	return status;
2276 }
2277 
2278 /*
2279  * returns < 0 error if the callback will never be called, otherwise
2280  * the result of the lock will be communicated via the callback.
2281  */
2282 int ocfs2_inode_lock_full_nested(struct inode *inode,
2283 				 struct buffer_head **ret_bh,
2284 				 int ex,
2285 				 int arg_flags,
2286 				 int subclass)
2287 {
2288 	int status, level, acquired;
2289 	u32 dlm_flags;
2290 	struct ocfs2_lock_res *lockres = NULL;
2291 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2292 	struct buffer_head *local_bh = NULL;
2293 
2294 	BUG_ON(!inode);
2295 
2296 	mlog(0, "inode %llu, take %s META lock\n",
2297 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
2298 	     ex ? "EXMODE" : "PRMODE");
2299 
2300 	status = 0;
2301 	acquired = 0;
2302 	/* We'll allow faking a readonly metadata lock for
2303 	 * rodevices. */
2304 	if (ocfs2_is_hard_readonly(osb)) {
2305 		if (ex)
2306 			status = -EROFS;
2307 		goto getbh;
2308 	}
2309 
2310 	if (ocfs2_mount_local(osb))
2311 		goto local;
2312 
2313 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2314 		ocfs2_wait_for_recovery(osb);
2315 
2316 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
2317 	level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2318 	dlm_flags = 0;
2319 	if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2320 		dlm_flags |= DLM_LKF_NOQUEUE;
2321 
2322 	status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2323 				      arg_flags, subclass, _RET_IP_);
2324 	if (status < 0) {
2325 		if (status != -EAGAIN && status != -EIOCBRETRY)
2326 			mlog_errno(status);
2327 		goto bail;
2328 	}
2329 
2330 	/* Notify the error cleanup path to drop the cluster lock. */
2331 	acquired = 1;
2332 
2333 	/* We wait twice because a node may have died while we were in
2334 	 * the lower dlm layers. The second time though, we've
2335 	 * committed to owning this lock so we don't allow signals to
2336 	 * abort the operation. */
2337 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2338 		ocfs2_wait_for_recovery(osb);
2339 
2340 local:
2341 	/*
2342 	 * We only see this flag if we're being called from
2343 	 * ocfs2_read_locked_inode(). It means we're locking an inode
2344 	 * which hasn't been populated yet, so clear the refresh flag
2345 	 * and let the caller handle it.
2346 	 */
2347 	if (inode->i_state & I_NEW) {
2348 		status = 0;
2349 		if (lockres)
2350 			ocfs2_complete_lock_res_refresh(lockres, 0);
2351 		goto bail;
2352 	}
2353 
2354 	/* This is fun. The caller may want a bh back, or it may
2355 	 * not. ocfs2_inode_lock_update definitely wants one in, but
2356 	 * may or may not read one, depending on what's in the
2357 	 * LVB. The result of all of this is that we've *only* gone to
2358 	 * disk if we have to, so the complexity is worthwhile. */
2359 	status = ocfs2_inode_lock_update(inode, &local_bh);
2360 	if (status < 0) {
2361 		if (status != -ENOENT)
2362 			mlog_errno(status);
2363 		goto bail;
2364 	}
2365 getbh:
2366 	if (ret_bh) {
2367 		status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2368 		if (status < 0) {
2369 			mlog_errno(status);
2370 			goto bail;
2371 		}
2372 	}
2373 
2374 bail:
2375 	if (status < 0) {
2376 		if (ret_bh && (*ret_bh)) {
2377 			brelse(*ret_bh);
2378 			*ret_bh = NULL;
2379 		}
2380 		if (acquired)
2381 			ocfs2_inode_unlock(inode, ex);
2382 	}
2383 
2384 	if (local_bh)
2385 		brelse(local_bh);
2386 
2387 	return status;
2388 }
2389 
2390 /*
2391  * This is working around a lock inversion between tasks acquiring DLM
2392  * locks while holding a page lock and the downconvert thread which
2393  * blocks dlm lock acquiry while acquiring page locks.
2394  *
2395  * ** These _with_page variantes are only intended to be called from aop
2396  * methods that hold page locks and return a very specific *positive* error
2397  * code that aop methods pass up to the VFS -- test for errors with != 0. **
2398  *
2399  * The DLM is called such that it returns -EAGAIN if it would have
2400  * blocked waiting for the downconvert thread.  In that case we unlock
2401  * our page so the downconvert thread can make progress.  Once we've
2402  * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2403  * that called us can bubble that back up into the VFS who will then
2404  * immediately retry the aop call.
2405  *
2406  * We do a blocking lock and immediate unlock before returning, though, so that
2407  * the lock has a great chance of being cached on this node by the time the VFS
2408  * calls back to retry the aop.    This has a potential to livelock as nodes
2409  * ping locks back and forth, but that's a risk we're willing to take to avoid
2410  * the lock inversion simply.
2411  */
2412 int ocfs2_inode_lock_with_page(struct inode *inode,
2413 			      struct buffer_head **ret_bh,
2414 			      int ex,
2415 			      struct page *page)
2416 {
2417 	int ret;
2418 
2419 	ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2420 	if (ret == -EAGAIN) {
2421 		unlock_page(page);
2422 		if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2423 			ocfs2_inode_unlock(inode, ex);
2424 		ret = AOP_TRUNCATED_PAGE;
2425 	}
2426 
2427 	return ret;
2428 }
2429 
2430 int ocfs2_inode_lock_atime(struct inode *inode,
2431 			  struct vfsmount *vfsmnt,
2432 			  int *level)
2433 {
2434 	int ret;
2435 
2436 	ret = ocfs2_inode_lock(inode, NULL, 0);
2437 	if (ret < 0) {
2438 		mlog_errno(ret);
2439 		return ret;
2440 	}
2441 
2442 	/*
2443 	 * If we should update atime, we will get EX lock,
2444 	 * otherwise we just get PR lock.
2445 	 */
2446 	if (ocfs2_should_update_atime(inode, vfsmnt)) {
2447 		struct buffer_head *bh = NULL;
2448 
2449 		ocfs2_inode_unlock(inode, 0);
2450 		ret = ocfs2_inode_lock(inode, &bh, 1);
2451 		if (ret < 0) {
2452 			mlog_errno(ret);
2453 			return ret;
2454 		}
2455 		*level = 1;
2456 		if (ocfs2_should_update_atime(inode, vfsmnt))
2457 			ocfs2_update_inode_atime(inode, bh);
2458 		if (bh)
2459 			brelse(bh);
2460 	} else
2461 		*level = 0;
2462 
2463 	return ret;
2464 }
2465 
2466 void ocfs2_inode_unlock(struct inode *inode,
2467 		       int ex)
2468 {
2469 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2470 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2471 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2472 
2473 	mlog(0, "inode %llu drop %s META lock\n",
2474 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
2475 	     ex ? "EXMODE" : "PRMODE");
2476 
2477 	if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2478 	    !ocfs2_mount_local(osb))
2479 		ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2480 }
2481 
2482 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2483 {
2484 	struct ocfs2_lock_res *lockres;
2485 	struct ocfs2_orphan_scan_lvb *lvb;
2486 	int status = 0;
2487 
2488 	if (ocfs2_is_hard_readonly(osb))
2489 		return -EROFS;
2490 
2491 	if (ocfs2_mount_local(osb))
2492 		return 0;
2493 
2494 	lockres = &osb->osb_orphan_scan.os_lockres;
2495 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2496 	if (status < 0)
2497 		return status;
2498 
2499 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2500 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2501 	    lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2502 		*seqno = be32_to_cpu(lvb->lvb_os_seqno);
2503 	else
2504 		*seqno = osb->osb_orphan_scan.os_seqno + 1;
2505 
2506 	return status;
2507 }
2508 
2509 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2510 {
2511 	struct ocfs2_lock_res *lockres;
2512 	struct ocfs2_orphan_scan_lvb *lvb;
2513 
2514 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2515 		lockres = &osb->osb_orphan_scan.os_lockres;
2516 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2517 		lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2518 		lvb->lvb_os_seqno = cpu_to_be32(seqno);
2519 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2520 	}
2521 }
2522 
2523 int ocfs2_super_lock(struct ocfs2_super *osb,
2524 		     int ex)
2525 {
2526 	int status = 0;
2527 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2528 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2529 
2530 	if (ocfs2_is_hard_readonly(osb))
2531 		return -EROFS;
2532 
2533 	if (ocfs2_mount_local(osb))
2534 		goto bail;
2535 
2536 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2537 	if (status < 0) {
2538 		mlog_errno(status);
2539 		goto bail;
2540 	}
2541 
2542 	/* The super block lock path is really in the best position to
2543 	 * know when resources covered by the lock need to be
2544 	 * refreshed, so we do it here. Of course, making sense of
2545 	 * everything is up to the caller :) */
2546 	status = ocfs2_should_refresh_lock_res(lockres);
2547 	if (status < 0) {
2548 		ocfs2_cluster_unlock(osb, lockres, level);
2549 		mlog_errno(status);
2550 		goto bail;
2551 	}
2552 	if (status) {
2553 		status = ocfs2_refresh_slot_info(osb);
2554 
2555 		ocfs2_complete_lock_res_refresh(lockres, status);
2556 
2557 		if (status < 0) {
2558 			ocfs2_cluster_unlock(osb, lockres, level);
2559 			mlog_errno(status);
2560 		}
2561 		ocfs2_track_lock_refresh(lockres);
2562 	}
2563 bail:
2564 	return status;
2565 }
2566 
2567 void ocfs2_super_unlock(struct ocfs2_super *osb,
2568 			int ex)
2569 {
2570 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2571 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2572 
2573 	if (!ocfs2_mount_local(osb))
2574 		ocfs2_cluster_unlock(osb, lockres, level);
2575 }
2576 
2577 int ocfs2_rename_lock(struct ocfs2_super *osb)
2578 {
2579 	int status;
2580 	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2581 
2582 	if (ocfs2_is_hard_readonly(osb))
2583 		return -EROFS;
2584 
2585 	if (ocfs2_mount_local(osb))
2586 		return 0;
2587 
2588 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2589 	if (status < 0)
2590 		mlog_errno(status);
2591 
2592 	return status;
2593 }
2594 
2595 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2596 {
2597 	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2598 
2599 	if (!ocfs2_mount_local(osb))
2600 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2601 }
2602 
2603 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2604 {
2605 	int status;
2606 	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2607 
2608 	if (ocfs2_is_hard_readonly(osb))
2609 		return -EROFS;
2610 
2611 	if (ocfs2_mount_local(osb))
2612 		return 0;
2613 
2614 	status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2615 				    0, 0);
2616 	if (status < 0)
2617 		mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2618 
2619 	return status;
2620 }
2621 
2622 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2623 {
2624 	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2625 
2626 	if (!ocfs2_mount_local(osb))
2627 		ocfs2_cluster_unlock(osb, lockres,
2628 				     ex ? LKM_EXMODE : LKM_PRMODE);
2629 }
2630 
2631 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2632 {
2633 	int ret;
2634 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2635 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2636 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2637 
2638 	BUG_ON(!dl);
2639 
2640 	if (ocfs2_is_hard_readonly(osb)) {
2641 		if (ex)
2642 			return -EROFS;
2643 		return 0;
2644 	}
2645 
2646 	if (ocfs2_mount_local(osb))
2647 		return 0;
2648 
2649 	ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2650 	if (ret < 0)
2651 		mlog_errno(ret);
2652 
2653 	return ret;
2654 }
2655 
2656 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2657 {
2658 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2659 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2660 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2661 
2662 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2663 		ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2664 }
2665 
2666 /* Reference counting of the dlm debug structure. We want this because
2667  * open references on the debug inodes can live on after a mount, so
2668  * we can't rely on the ocfs2_super to always exist. */
2669 static void ocfs2_dlm_debug_free(struct kref *kref)
2670 {
2671 	struct ocfs2_dlm_debug *dlm_debug;
2672 
2673 	dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2674 
2675 	kfree(dlm_debug);
2676 }
2677 
2678 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2679 {
2680 	if (dlm_debug)
2681 		kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2682 }
2683 
2684 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2685 {
2686 	kref_get(&debug->d_refcnt);
2687 }
2688 
2689 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2690 {
2691 	struct ocfs2_dlm_debug *dlm_debug;
2692 
2693 	dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2694 	if (!dlm_debug) {
2695 		mlog_errno(-ENOMEM);
2696 		goto out;
2697 	}
2698 
2699 	kref_init(&dlm_debug->d_refcnt);
2700 	INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2701 	dlm_debug->d_locking_state = NULL;
2702 out:
2703 	return dlm_debug;
2704 }
2705 
2706 /* Access to this is arbitrated for us via seq_file->sem. */
2707 struct ocfs2_dlm_seq_priv {
2708 	struct ocfs2_dlm_debug *p_dlm_debug;
2709 	struct ocfs2_lock_res p_iter_res;
2710 	struct ocfs2_lock_res p_tmp_res;
2711 };
2712 
2713 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2714 						 struct ocfs2_dlm_seq_priv *priv)
2715 {
2716 	struct ocfs2_lock_res *iter, *ret = NULL;
2717 	struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2718 
2719 	assert_spin_locked(&ocfs2_dlm_tracking_lock);
2720 
2721 	list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2722 		/* discover the head of the list */
2723 		if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2724 			mlog(0, "End of list found, %p\n", ret);
2725 			break;
2726 		}
2727 
2728 		/* We track our "dummy" iteration lockres' by a NULL
2729 		 * l_ops field. */
2730 		if (iter->l_ops != NULL) {
2731 			ret = iter;
2732 			break;
2733 		}
2734 	}
2735 
2736 	return ret;
2737 }
2738 
2739 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2740 {
2741 	struct ocfs2_dlm_seq_priv *priv = m->private;
2742 	struct ocfs2_lock_res *iter;
2743 
2744 	spin_lock(&ocfs2_dlm_tracking_lock);
2745 	iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2746 	if (iter) {
2747 		/* Since lockres' have the lifetime of their container
2748 		 * (which can be inodes, ocfs2_supers, etc) we want to
2749 		 * copy this out to a temporary lockres while still
2750 		 * under the spinlock. Obviously after this we can't
2751 		 * trust any pointers on the copy returned, but that's
2752 		 * ok as the information we want isn't typically held
2753 		 * in them. */
2754 		priv->p_tmp_res = *iter;
2755 		iter = &priv->p_tmp_res;
2756 	}
2757 	spin_unlock(&ocfs2_dlm_tracking_lock);
2758 
2759 	return iter;
2760 }
2761 
2762 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2763 {
2764 }
2765 
2766 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2767 {
2768 	struct ocfs2_dlm_seq_priv *priv = m->private;
2769 	struct ocfs2_lock_res *iter = v;
2770 	struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2771 
2772 	spin_lock(&ocfs2_dlm_tracking_lock);
2773 	iter = ocfs2_dlm_next_res(iter, priv);
2774 	list_del_init(&dummy->l_debug_list);
2775 	if (iter) {
2776 		list_add(&dummy->l_debug_list, &iter->l_debug_list);
2777 		priv->p_tmp_res = *iter;
2778 		iter = &priv->p_tmp_res;
2779 	}
2780 	spin_unlock(&ocfs2_dlm_tracking_lock);
2781 
2782 	return iter;
2783 }
2784 
2785 /*
2786  * Version is used by debugfs.ocfs2 to determine the format being used
2787  *
2788  * New in version 2
2789  *	- Lock stats printed
2790  * New in version 3
2791  *	- Max time in lock stats is in usecs (instead of nsecs)
2792  */
2793 #define OCFS2_DLM_DEBUG_STR_VERSION 3
2794 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2795 {
2796 	int i;
2797 	char *lvb;
2798 	struct ocfs2_lock_res *lockres = v;
2799 
2800 	if (!lockres)
2801 		return -EINVAL;
2802 
2803 	seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2804 
2805 	if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2806 		seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2807 			   lockres->l_name,
2808 			   (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2809 	else
2810 		seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2811 
2812 	seq_printf(m, "%d\t"
2813 		   "0x%lx\t"
2814 		   "0x%x\t"
2815 		   "0x%x\t"
2816 		   "%u\t"
2817 		   "%u\t"
2818 		   "%d\t"
2819 		   "%d\t",
2820 		   lockres->l_level,
2821 		   lockres->l_flags,
2822 		   lockres->l_action,
2823 		   lockres->l_unlock_action,
2824 		   lockres->l_ro_holders,
2825 		   lockres->l_ex_holders,
2826 		   lockres->l_requested,
2827 		   lockres->l_blocking);
2828 
2829 	/* Dump the raw LVB */
2830 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2831 	for(i = 0; i < DLM_LVB_LEN; i++)
2832 		seq_printf(m, "0x%x\t", lvb[i]);
2833 
2834 #ifdef CONFIG_OCFS2_FS_STATS
2835 # define lock_num_prmode(_l)		((_l)->l_lock_prmode.ls_gets)
2836 # define lock_num_exmode(_l)		((_l)->l_lock_exmode.ls_gets)
2837 # define lock_num_prmode_failed(_l)	((_l)->l_lock_prmode.ls_fail)
2838 # define lock_num_exmode_failed(_l)	((_l)->l_lock_exmode.ls_fail)
2839 # define lock_total_prmode(_l)		((_l)->l_lock_prmode.ls_total)
2840 # define lock_total_exmode(_l)		((_l)->l_lock_exmode.ls_total)
2841 # define lock_max_prmode(_l)		((_l)->l_lock_prmode.ls_max)
2842 # define lock_max_exmode(_l)		((_l)->l_lock_exmode.ls_max)
2843 # define lock_refresh(_l)		((_l)->l_lock_refresh)
2844 #else
2845 # define lock_num_prmode(_l)		(0)
2846 # define lock_num_exmode(_l)		(0)
2847 # define lock_num_prmode_failed(_l)	(0)
2848 # define lock_num_exmode_failed(_l)	(0)
2849 # define lock_total_prmode(_l)		(0ULL)
2850 # define lock_total_exmode(_l)		(0ULL)
2851 # define lock_max_prmode(_l)		(0)
2852 # define lock_max_exmode(_l)		(0)
2853 # define lock_refresh(_l)		(0)
2854 #endif
2855 	/* The following seq_print was added in version 2 of this output */
2856 	seq_printf(m, "%u\t"
2857 		   "%u\t"
2858 		   "%u\t"
2859 		   "%u\t"
2860 		   "%llu\t"
2861 		   "%llu\t"
2862 		   "%u\t"
2863 		   "%u\t"
2864 		   "%u\t",
2865 		   lock_num_prmode(lockres),
2866 		   lock_num_exmode(lockres),
2867 		   lock_num_prmode_failed(lockres),
2868 		   lock_num_exmode_failed(lockres),
2869 		   lock_total_prmode(lockres),
2870 		   lock_total_exmode(lockres),
2871 		   lock_max_prmode(lockres),
2872 		   lock_max_exmode(lockres),
2873 		   lock_refresh(lockres));
2874 
2875 	/* End the line */
2876 	seq_printf(m, "\n");
2877 	return 0;
2878 }
2879 
2880 static const struct seq_operations ocfs2_dlm_seq_ops = {
2881 	.start =	ocfs2_dlm_seq_start,
2882 	.stop =		ocfs2_dlm_seq_stop,
2883 	.next =		ocfs2_dlm_seq_next,
2884 	.show =		ocfs2_dlm_seq_show,
2885 };
2886 
2887 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2888 {
2889 	struct seq_file *seq = file->private_data;
2890 	struct ocfs2_dlm_seq_priv *priv = seq->private;
2891 	struct ocfs2_lock_res *res = &priv->p_iter_res;
2892 
2893 	ocfs2_remove_lockres_tracking(res);
2894 	ocfs2_put_dlm_debug(priv->p_dlm_debug);
2895 	return seq_release_private(inode, file);
2896 }
2897 
2898 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2899 {
2900 	int ret;
2901 	struct ocfs2_dlm_seq_priv *priv;
2902 	struct seq_file *seq;
2903 	struct ocfs2_super *osb;
2904 
2905 	priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2906 	if (!priv) {
2907 		ret = -ENOMEM;
2908 		mlog_errno(ret);
2909 		goto out;
2910 	}
2911 	osb = inode->i_private;
2912 	ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2913 	priv->p_dlm_debug = osb->osb_dlm_debug;
2914 	INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2915 
2916 	ret = seq_open(file, &ocfs2_dlm_seq_ops);
2917 	if (ret) {
2918 		kfree(priv);
2919 		mlog_errno(ret);
2920 		goto out;
2921 	}
2922 
2923 	seq = file->private_data;
2924 	seq->private = priv;
2925 
2926 	ocfs2_add_lockres_tracking(&priv->p_iter_res,
2927 				   priv->p_dlm_debug);
2928 
2929 out:
2930 	return ret;
2931 }
2932 
2933 static const struct file_operations ocfs2_dlm_debug_fops = {
2934 	.open =		ocfs2_dlm_debug_open,
2935 	.release =	ocfs2_dlm_debug_release,
2936 	.read =		seq_read,
2937 	.llseek =	seq_lseek,
2938 };
2939 
2940 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2941 {
2942 	int ret = 0;
2943 	struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2944 
2945 	dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2946 							 S_IFREG|S_IRUSR,
2947 							 osb->osb_debug_root,
2948 							 osb,
2949 							 &ocfs2_dlm_debug_fops);
2950 	if (!dlm_debug->d_locking_state) {
2951 		ret = -EINVAL;
2952 		mlog(ML_ERROR,
2953 		     "Unable to create locking state debugfs file.\n");
2954 		goto out;
2955 	}
2956 
2957 	ocfs2_get_dlm_debug(dlm_debug);
2958 out:
2959 	return ret;
2960 }
2961 
2962 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2963 {
2964 	struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2965 
2966 	if (dlm_debug) {
2967 		debugfs_remove(dlm_debug->d_locking_state);
2968 		ocfs2_put_dlm_debug(dlm_debug);
2969 	}
2970 }
2971 
2972 int ocfs2_dlm_init(struct ocfs2_super *osb)
2973 {
2974 	int status = 0;
2975 	struct ocfs2_cluster_connection *conn = NULL;
2976 
2977 	if (ocfs2_mount_local(osb)) {
2978 		osb->node_num = 0;
2979 		goto local;
2980 	}
2981 
2982 	status = ocfs2_dlm_init_debug(osb);
2983 	if (status < 0) {
2984 		mlog_errno(status);
2985 		goto bail;
2986 	}
2987 
2988 	/* launch downconvert thread */
2989 	osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2990 	if (IS_ERR(osb->dc_task)) {
2991 		status = PTR_ERR(osb->dc_task);
2992 		osb->dc_task = NULL;
2993 		mlog_errno(status);
2994 		goto bail;
2995 	}
2996 
2997 	/* for now, uuid == domain */
2998 	status = ocfs2_cluster_connect(osb->osb_cluster_stack,
2999 				       osb->uuid_str,
3000 				       strlen(osb->uuid_str),
3001 				       &lproto, ocfs2_do_node_down, osb,
3002 				       &conn);
3003 	if (status) {
3004 		mlog_errno(status);
3005 		goto bail;
3006 	}
3007 
3008 	status = ocfs2_cluster_this_node(&osb->node_num);
3009 	if (status < 0) {
3010 		mlog_errno(status);
3011 		mlog(ML_ERROR,
3012 		     "could not find this host's node number\n");
3013 		ocfs2_cluster_disconnect(conn, 0);
3014 		goto bail;
3015 	}
3016 
3017 local:
3018 	ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3019 	ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3020 	ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3021 	ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3022 
3023 	osb->cconn = conn;
3024 
3025 	status = 0;
3026 bail:
3027 	if (status < 0) {
3028 		ocfs2_dlm_shutdown_debug(osb);
3029 		if (osb->dc_task)
3030 			kthread_stop(osb->dc_task);
3031 	}
3032 
3033 	return status;
3034 }
3035 
3036 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3037 			int hangup_pending)
3038 {
3039 	ocfs2_drop_osb_locks(osb);
3040 
3041 	/*
3042 	 * Now that we have dropped all locks and ocfs2_dismount_volume()
3043 	 * has disabled recovery, the DLM won't be talking to us.  It's
3044 	 * safe to tear things down before disconnecting the cluster.
3045 	 */
3046 
3047 	if (osb->dc_task) {
3048 		kthread_stop(osb->dc_task);
3049 		osb->dc_task = NULL;
3050 	}
3051 
3052 	ocfs2_lock_res_free(&osb->osb_super_lockres);
3053 	ocfs2_lock_res_free(&osb->osb_rename_lockres);
3054 	ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3055 	ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3056 
3057 	ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3058 	osb->cconn = NULL;
3059 
3060 	ocfs2_dlm_shutdown_debug(osb);
3061 }
3062 
3063 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3064 			   struct ocfs2_lock_res *lockres)
3065 {
3066 	int ret;
3067 	unsigned long flags;
3068 	u32 lkm_flags = 0;
3069 
3070 	/* We didn't get anywhere near actually using this lockres. */
3071 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3072 		goto out;
3073 
3074 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3075 		lkm_flags |= DLM_LKF_VALBLK;
3076 
3077 	spin_lock_irqsave(&lockres->l_lock, flags);
3078 
3079 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3080 			"lockres %s, flags 0x%lx\n",
3081 			lockres->l_name, lockres->l_flags);
3082 
3083 	while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3084 		mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3085 		     "%u, unlock_action = %u\n",
3086 		     lockres->l_name, lockres->l_flags, lockres->l_action,
3087 		     lockres->l_unlock_action);
3088 
3089 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3090 
3091 		/* XXX: Today we just wait on any busy
3092 		 * locks... Perhaps we need to cancel converts in the
3093 		 * future? */
3094 		ocfs2_wait_on_busy_lock(lockres);
3095 
3096 		spin_lock_irqsave(&lockres->l_lock, flags);
3097 	}
3098 
3099 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3100 		if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3101 		    lockres->l_level == DLM_LOCK_EX &&
3102 		    !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3103 			lockres->l_ops->set_lvb(lockres);
3104 	}
3105 
3106 	if (lockres->l_flags & OCFS2_LOCK_BUSY)
3107 		mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3108 		     lockres->l_name);
3109 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3110 		mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3111 
3112 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3113 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3114 		goto out;
3115 	}
3116 
3117 	lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3118 
3119 	/* make sure we never get here while waiting for an ast to
3120 	 * fire. */
3121 	BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3122 
3123 	/* is this necessary? */
3124 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3125 	lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3126 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3127 
3128 	mlog(0, "lock %s\n", lockres->l_name);
3129 
3130 	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3131 	if (ret) {
3132 		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3133 		mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3134 		ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3135 		BUG();
3136 	}
3137 	mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3138 	     lockres->l_name);
3139 
3140 	ocfs2_wait_on_busy_lock(lockres);
3141 out:
3142 	return 0;
3143 }
3144 
3145 /* Mark the lockres as being dropped. It will no longer be
3146  * queued if blocking, but we still may have to wait on it
3147  * being dequeued from the downconvert thread before we can consider
3148  * it safe to drop.
3149  *
3150  * You can *not* attempt to call cluster_lock on this lockres anymore. */
3151 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
3152 {
3153 	int status;
3154 	struct ocfs2_mask_waiter mw;
3155 	unsigned long flags;
3156 
3157 	ocfs2_init_mask_waiter(&mw);
3158 
3159 	spin_lock_irqsave(&lockres->l_lock, flags);
3160 	lockres->l_flags |= OCFS2_LOCK_FREEING;
3161 	while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3162 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3163 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3164 
3165 		mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3166 
3167 		status = ocfs2_wait_for_mask(&mw);
3168 		if (status)
3169 			mlog_errno(status);
3170 
3171 		spin_lock_irqsave(&lockres->l_lock, flags);
3172 	}
3173 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3174 }
3175 
3176 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3177 			       struct ocfs2_lock_res *lockres)
3178 {
3179 	int ret;
3180 
3181 	ocfs2_mark_lockres_freeing(lockres);
3182 	ret = ocfs2_drop_lock(osb, lockres);
3183 	if (ret)
3184 		mlog_errno(ret);
3185 }
3186 
3187 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3188 {
3189 	ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3190 	ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3191 	ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3192 	ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3193 }
3194 
3195 int ocfs2_drop_inode_locks(struct inode *inode)
3196 {
3197 	int status, err;
3198 
3199 	/* No need to call ocfs2_mark_lockres_freeing here -
3200 	 * ocfs2_clear_inode has done it for us. */
3201 
3202 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3203 			      &OCFS2_I(inode)->ip_open_lockres);
3204 	if (err < 0)
3205 		mlog_errno(err);
3206 
3207 	status = err;
3208 
3209 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3210 			      &OCFS2_I(inode)->ip_inode_lockres);
3211 	if (err < 0)
3212 		mlog_errno(err);
3213 	if (err < 0 && !status)
3214 		status = err;
3215 
3216 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3217 			      &OCFS2_I(inode)->ip_rw_lockres);
3218 	if (err < 0)
3219 		mlog_errno(err);
3220 	if (err < 0 && !status)
3221 		status = err;
3222 
3223 	return status;
3224 }
3225 
3226 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3227 					      int new_level)
3228 {
3229 	assert_spin_locked(&lockres->l_lock);
3230 
3231 	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3232 
3233 	if (lockres->l_level <= new_level) {
3234 		mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3235 		     "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3236 		     "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3237 		     new_level, list_empty(&lockres->l_blocked_list),
3238 		     list_empty(&lockres->l_mask_waiters), lockres->l_type,
3239 		     lockres->l_flags, lockres->l_ro_holders,
3240 		     lockres->l_ex_holders, lockres->l_action,
3241 		     lockres->l_unlock_action, lockres->l_requested,
3242 		     lockres->l_blocking, lockres->l_pending_gen);
3243 		BUG();
3244 	}
3245 
3246 	mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3247 	     lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3248 
3249 	lockres->l_action = OCFS2_AST_DOWNCONVERT;
3250 	lockres->l_requested = new_level;
3251 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3252 	return lockres_set_pending(lockres);
3253 }
3254 
3255 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3256 				  struct ocfs2_lock_res *lockres,
3257 				  int new_level,
3258 				  int lvb,
3259 				  unsigned int generation)
3260 {
3261 	int ret;
3262 	u32 dlm_flags = DLM_LKF_CONVERT;
3263 
3264 	mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3265 	     lockres->l_level, new_level);
3266 
3267 	if (lvb)
3268 		dlm_flags |= DLM_LKF_VALBLK;
3269 
3270 	ret = ocfs2_dlm_lock(osb->cconn,
3271 			     new_level,
3272 			     &lockres->l_lksb,
3273 			     dlm_flags,
3274 			     lockres->l_name,
3275 			     OCFS2_LOCK_ID_MAX_LEN - 1);
3276 	lockres_clear_pending(lockres, generation, osb);
3277 	if (ret) {
3278 		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3279 		ocfs2_recover_from_dlm_error(lockres, 1);
3280 		goto bail;
3281 	}
3282 
3283 	ret = 0;
3284 bail:
3285 	return ret;
3286 }
3287 
3288 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3289 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3290 				        struct ocfs2_lock_res *lockres)
3291 {
3292 	assert_spin_locked(&lockres->l_lock);
3293 
3294 	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3295 		/* If we're already trying to cancel a lock conversion
3296 		 * then just drop the spinlock and allow the caller to
3297 		 * requeue this lock. */
3298 		mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3299 		return 0;
3300 	}
3301 
3302 	/* were we in a convert when we got the bast fire? */
3303 	BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3304 	       lockres->l_action != OCFS2_AST_DOWNCONVERT);
3305 	/* set things up for the unlockast to know to just
3306 	 * clear out the ast_action and unset busy, etc. */
3307 	lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3308 
3309 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3310 			"lock %s, invalid flags: 0x%lx\n",
3311 			lockres->l_name, lockres->l_flags);
3312 
3313 	mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3314 
3315 	return 1;
3316 }
3317 
3318 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3319 				struct ocfs2_lock_res *lockres)
3320 {
3321 	int ret;
3322 
3323 	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3324 			       DLM_LKF_CANCEL);
3325 	if (ret) {
3326 		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3327 		ocfs2_recover_from_dlm_error(lockres, 0);
3328 	}
3329 
3330 	mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3331 
3332 	return ret;
3333 }
3334 
3335 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3336 			      struct ocfs2_lock_res *lockres,
3337 			      struct ocfs2_unblock_ctl *ctl)
3338 {
3339 	unsigned long flags;
3340 	int blocking;
3341 	int new_level;
3342 	int level;
3343 	int ret = 0;
3344 	int set_lvb = 0;
3345 	unsigned int gen;
3346 
3347 	spin_lock_irqsave(&lockres->l_lock, flags);
3348 
3349 recheck:
3350 	/*
3351 	 * Is it still blocking? If not, we have no more work to do.
3352 	 */
3353 	if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3354 		BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3355 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3356 		ret = 0;
3357 		goto leave;
3358 	}
3359 
3360 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3361 		/* XXX
3362 		 * This is a *big* race.  The OCFS2_LOCK_PENDING flag
3363 		 * exists entirely for one reason - another thread has set
3364 		 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3365 		 *
3366 		 * If we do ocfs2_cancel_convert() before the other thread
3367 		 * calls dlm_lock(), our cancel will do nothing.  We will
3368 		 * get no ast, and we will have no way of knowing the
3369 		 * cancel failed.  Meanwhile, the other thread will call
3370 		 * into dlm_lock() and wait...forever.
3371 		 *
3372 		 * Why forever?  Because another node has asked for the
3373 		 * lock first; that's why we're here in unblock_lock().
3374 		 *
3375 		 * The solution is OCFS2_LOCK_PENDING.  When PENDING is
3376 		 * set, we just requeue the unblock.  Only when the other
3377 		 * thread has called dlm_lock() and cleared PENDING will
3378 		 * we then cancel their request.
3379 		 *
3380 		 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3381 		 * at the same time they set OCFS2_DLM_BUSY.  They must
3382 		 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3383 		 */
3384 		if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3385 			mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3386 			     lockres->l_name);
3387 			goto leave_requeue;
3388 		}
3389 
3390 		ctl->requeue = 1;
3391 		ret = ocfs2_prepare_cancel_convert(osb, lockres);
3392 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3393 		if (ret) {
3394 			ret = ocfs2_cancel_convert(osb, lockres);
3395 			if (ret < 0)
3396 				mlog_errno(ret);
3397 		}
3398 		goto leave;
3399 	}
3400 
3401 	/*
3402 	 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3403 	 * set when the ast is received for an upconvert just before the
3404 	 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3405 	 * on the heels of the ast, we want to delay the downconvert just
3406 	 * enough to allow the up requestor to do its task. Because this
3407 	 * lock is in the blocked queue, the lock will be downconverted
3408 	 * as soon as the requestor is done with the lock.
3409 	 */
3410 	if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3411 		goto leave_requeue;
3412 
3413 	/*
3414 	 * How can we block and yet be at NL?  We were trying to upconvert
3415 	 * from NL and got canceled.  The code comes back here, and now
3416 	 * we notice and clear BLOCKING.
3417 	 */
3418 	if (lockres->l_level == DLM_LOCK_NL) {
3419 		BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3420 		mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3421 		lockres->l_blocking = DLM_LOCK_NL;
3422 		lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3423 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3424 		goto leave;
3425 	}
3426 
3427 	/* if we're blocking an exclusive and we have *any* holders,
3428 	 * then requeue. */
3429 	if ((lockres->l_blocking == DLM_LOCK_EX)
3430 	    && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3431 		mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3432 		     lockres->l_name, lockres->l_ex_holders,
3433 		     lockres->l_ro_holders);
3434 		goto leave_requeue;
3435 	}
3436 
3437 	/* If it's a PR we're blocking, then only
3438 	 * requeue if we've got any EX holders */
3439 	if (lockres->l_blocking == DLM_LOCK_PR &&
3440 	    lockres->l_ex_holders) {
3441 		mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3442 		     lockres->l_name, lockres->l_ex_holders);
3443 		goto leave_requeue;
3444 	}
3445 
3446 	/*
3447 	 * Can we get a lock in this state if the holder counts are
3448 	 * zero? The meta data unblock code used to check this.
3449 	 */
3450 	if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3451 	    && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3452 		mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3453 		     lockres->l_name);
3454 		goto leave_requeue;
3455 	}
3456 
3457 	new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3458 
3459 	if (lockres->l_ops->check_downconvert
3460 	    && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3461 		mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3462 		     lockres->l_name);
3463 		goto leave_requeue;
3464 	}
3465 
3466 	/* If we get here, then we know that there are no more
3467 	 * incompatible holders (and anyone asking for an incompatible
3468 	 * lock is blocked). We can now downconvert the lock */
3469 	if (!lockres->l_ops->downconvert_worker)
3470 		goto downconvert;
3471 
3472 	/* Some lockres types want to do a bit of work before
3473 	 * downconverting a lock. Allow that here. The worker function
3474 	 * may sleep, so we save off a copy of what we're blocking as
3475 	 * it may change while we're not holding the spin lock. */
3476 	blocking = lockres->l_blocking;
3477 	level = lockres->l_level;
3478 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3479 
3480 	ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3481 
3482 	if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3483 		mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3484 		     lockres->l_name);
3485 		goto leave;
3486 	}
3487 
3488 	spin_lock_irqsave(&lockres->l_lock, flags);
3489 	if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3490 		/* If this changed underneath us, then we can't drop
3491 		 * it just yet. */
3492 		mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3493 		     "Recheck\n", lockres->l_name, blocking,
3494 		     lockres->l_blocking, level, lockres->l_level);
3495 		goto recheck;
3496 	}
3497 
3498 downconvert:
3499 	ctl->requeue = 0;
3500 
3501 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3502 		if (lockres->l_level == DLM_LOCK_EX)
3503 			set_lvb = 1;
3504 
3505 		/*
3506 		 * We only set the lvb if the lock has been fully
3507 		 * refreshed - otherwise we risk setting stale
3508 		 * data. Otherwise, there's no need to actually clear
3509 		 * out the lvb here as it's value is still valid.
3510 		 */
3511 		if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3512 			lockres->l_ops->set_lvb(lockres);
3513 	}
3514 
3515 	gen = ocfs2_prepare_downconvert(lockres, new_level);
3516 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3517 	ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3518 				     gen);
3519 
3520 leave:
3521 	if (ret)
3522 		mlog_errno(ret);
3523 	return ret;
3524 
3525 leave_requeue:
3526 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3527 	ctl->requeue = 1;
3528 
3529 	return 0;
3530 }
3531 
3532 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3533 				     int blocking)
3534 {
3535 	struct inode *inode;
3536 	struct address_space *mapping;
3537 	struct ocfs2_inode_info *oi;
3538 
3539        	inode = ocfs2_lock_res_inode(lockres);
3540 	mapping = inode->i_mapping;
3541 
3542 	if (S_ISDIR(inode->i_mode)) {
3543 		oi = OCFS2_I(inode);
3544 		oi->ip_dir_lock_gen++;
3545 		mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
3546 		goto out;
3547 	}
3548 
3549 	if (!S_ISREG(inode->i_mode))
3550 		goto out;
3551 
3552 	/*
3553 	 * We need this before the filemap_fdatawrite() so that it can
3554 	 * transfer the dirty bit from the PTE to the
3555 	 * page. Unfortunately this means that even for EX->PR
3556 	 * downconverts, we'll lose our mappings and have to build
3557 	 * them up again.
3558 	 */
3559 	unmap_mapping_range(mapping, 0, 0, 0);
3560 
3561 	if (filemap_fdatawrite(mapping)) {
3562 		mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3563 		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
3564 	}
3565 	sync_mapping_buffers(mapping);
3566 	if (blocking == DLM_LOCK_EX) {
3567 		truncate_inode_pages(mapping, 0);
3568 	} else {
3569 		/* We only need to wait on the I/O if we're not also
3570 		 * truncating pages because truncate_inode_pages waits
3571 		 * for us above. We don't truncate pages if we're
3572 		 * blocking anything < EXMODE because we want to keep
3573 		 * them around in that case. */
3574 		filemap_fdatawait(mapping);
3575 	}
3576 
3577 out:
3578 	return UNBLOCK_CONTINUE;
3579 }
3580 
3581 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3582 				 struct ocfs2_lock_res *lockres,
3583 				 int new_level)
3584 {
3585 	int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3586 
3587 	BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3588 	BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3589 
3590 	if (checkpointed)
3591 		return 1;
3592 
3593 	ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3594 	return 0;
3595 }
3596 
3597 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3598 					int new_level)
3599 {
3600 	struct inode *inode = ocfs2_lock_res_inode(lockres);
3601 
3602 	return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3603 }
3604 
3605 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3606 {
3607 	struct inode *inode = ocfs2_lock_res_inode(lockres);
3608 
3609 	__ocfs2_stuff_meta_lvb(inode);
3610 }
3611 
3612 /*
3613  * Does the final reference drop on our dentry lock. Right now this
3614  * happens in the downconvert thread, but we could choose to simplify the
3615  * dlmglue API and push these off to the ocfs2_wq in the future.
3616  */
3617 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3618 				     struct ocfs2_lock_res *lockres)
3619 {
3620 	struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3621 	ocfs2_dentry_lock_put(osb, dl);
3622 }
3623 
3624 /*
3625  * d_delete() matching dentries before the lock downconvert.
3626  *
3627  * At this point, any process waiting to destroy the
3628  * dentry_lock due to last ref count is stopped by the
3629  * OCFS2_LOCK_QUEUED flag.
3630  *
3631  * We have two potential problems
3632  *
3633  * 1) If we do the last reference drop on our dentry_lock (via dput)
3634  *    we'll wind up in ocfs2_release_dentry_lock(), waiting on
3635  *    the downconvert to finish. Instead we take an elevated
3636  *    reference and push the drop until after we've completed our
3637  *    unblock processing.
3638  *
3639  * 2) There might be another process with a final reference,
3640  *    waiting on us to finish processing. If this is the case, we
3641  *    detect it and exit out - there's no more dentries anyway.
3642  */
3643 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3644 				       int blocking)
3645 {
3646 	struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3647 	struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3648 	struct dentry *dentry;
3649 	unsigned long flags;
3650 	int extra_ref = 0;
3651 
3652 	/*
3653 	 * This node is blocking another node from getting a read
3654 	 * lock. This happens when we've renamed within a
3655 	 * directory. We've forced the other nodes to d_delete(), but
3656 	 * we never actually dropped our lock because it's still
3657 	 * valid. The downconvert code will retain a PR for this node,
3658 	 * so there's no further work to do.
3659 	 */
3660 	if (blocking == DLM_LOCK_PR)
3661 		return UNBLOCK_CONTINUE;
3662 
3663 	/*
3664 	 * Mark this inode as potentially orphaned. The code in
3665 	 * ocfs2_delete_inode() will figure out whether it actually
3666 	 * needs to be freed or not.
3667 	 */
3668 	spin_lock(&oi->ip_lock);
3669 	oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3670 	spin_unlock(&oi->ip_lock);
3671 
3672 	/*
3673 	 * Yuck. We need to make sure however that the check of
3674 	 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3675 	 * respect to a reference decrement or the setting of that
3676 	 * flag.
3677 	 */
3678 	spin_lock_irqsave(&lockres->l_lock, flags);
3679 	spin_lock(&dentry_attach_lock);
3680 	if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3681 	    && dl->dl_count) {
3682 		dl->dl_count++;
3683 		extra_ref = 1;
3684 	}
3685 	spin_unlock(&dentry_attach_lock);
3686 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3687 
3688 	mlog(0, "extra_ref = %d\n", extra_ref);
3689 
3690 	/*
3691 	 * We have a process waiting on us in ocfs2_dentry_iput(),
3692 	 * which means we can't have any more outstanding
3693 	 * aliases. There's no need to do any more work.
3694 	 */
3695 	if (!extra_ref)
3696 		return UNBLOCK_CONTINUE;
3697 
3698 	spin_lock(&dentry_attach_lock);
3699 	while (1) {
3700 		dentry = ocfs2_find_local_alias(dl->dl_inode,
3701 						dl->dl_parent_blkno, 1);
3702 		if (!dentry)
3703 			break;
3704 		spin_unlock(&dentry_attach_lock);
3705 
3706 		mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3707 		     dentry->d_name.name);
3708 
3709 		/*
3710 		 * The following dcache calls may do an
3711 		 * iput(). Normally we don't want that from the
3712 		 * downconverting thread, but in this case it's ok
3713 		 * because the requesting node already has an
3714 		 * exclusive lock on the inode, so it can't be queued
3715 		 * for a downconvert.
3716 		 */
3717 		d_delete(dentry);
3718 		dput(dentry);
3719 
3720 		spin_lock(&dentry_attach_lock);
3721 	}
3722 	spin_unlock(&dentry_attach_lock);
3723 
3724 	/*
3725 	 * If we are the last holder of this dentry lock, there is no
3726 	 * reason to downconvert so skip straight to the unlock.
3727 	 */
3728 	if (dl->dl_count == 1)
3729 		return UNBLOCK_STOP_POST;
3730 
3731 	return UNBLOCK_CONTINUE_POST;
3732 }
3733 
3734 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
3735 					    int new_level)
3736 {
3737 	struct ocfs2_refcount_tree *tree =
3738 				ocfs2_lock_res_refcount_tree(lockres);
3739 
3740 	return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
3741 }
3742 
3743 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
3744 					 int blocking)
3745 {
3746 	struct ocfs2_refcount_tree *tree =
3747 				ocfs2_lock_res_refcount_tree(lockres);
3748 
3749 	ocfs2_metadata_cache_purge(&tree->rf_ci);
3750 
3751 	return UNBLOCK_CONTINUE;
3752 }
3753 
3754 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3755 {
3756 	struct ocfs2_qinfo_lvb *lvb;
3757 	struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
3758 	struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3759 					    oinfo->dqi_gi.dqi_type);
3760 
3761 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3762 	lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3763 	lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
3764 	lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
3765 	lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
3766 	lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3767 	lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3768 	lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3769 }
3770 
3771 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3772 {
3773 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3774 	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3775 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3776 
3777 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3778 		ocfs2_cluster_unlock(osb, lockres, level);
3779 }
3780 
3781 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3782 {
3783 	struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3784 					    oinfo->dqi_gi.dqi_type);
3785 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3786 	struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3787 	struct buffer_head *bh = NULL;
3788 	struct ocfs2_global_disk_dqinfo *gdinfo;
3789 	int status = 0;
3790 
3791 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3792 	    lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3793 		info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3794 		info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3795 		oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
3796 		oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
3797 		oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
3798 		oinfo->dqi_gi.dqi_free_entry =
3799 					be32_to_cpu(lvb->lvb_free_entry);
3800 	} else {
3801 		status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
3802 						     oinfo->dqi_giblk, &bh);
3803 		if (status) {
3804 			mlog_errno(status);
3805 			goto bail;
3806 		}
3807 		gdinfo = (struct ocfs2_global_disk_dqinfo *)
3808 					(bh->b_data + OCFS2_GLOBAL_INFO_OFF);
3809 		info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
3810 		info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
3811 		oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
3812 		oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
3813 		oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
3814 		oinfo->dqi_gi.dqi_free_entry =
3815 					le32_to_cpu(gdinfo->dqi_free_entry);
3816 		brelse(bh);
3817 		ocfs2_track_lock_refresh(lockres);
3818 	}
3819 
3820 bail:
3821 	return status;
3822 }
3823 
3824 /* Lock quota info, this function expects at least shared lock on the quota file
3825  * so that we can safely refresh quota info from disk. */
3826 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3827 {
3828 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3829 	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3830 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3831 	int status = 0;
3832 
3833 	/* On RO devices, locking really isn't needed... */
3834 	if (ocfs2_is_hard_readonly(osb)) {
3835 		if (ex)
3836 			status = -EROFS;
3837 		goto bail;
3838 	}
3839 	if (ocfs2_mount_local(osb))
3840 		goto bail;
3841 
3842 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3843 	if (status < 0) {
3844 		mlog_errno(status);
3845 		goto bail;
3846 	}
3847 	if (!ocfs2_should_refresh_lock_res(lockres))
3848 		goto bail;
3849 	/* OK, we have the lock but we need to refresh the quota info */
3850 	status = ocfs2_refresh_qinfo(oinfo);
3851 	if (status)
3852 		ocfs2_qinfo_unlock(oinfo, ex);
3853 	ocfs2_complete_lock_res_refresh(lockres, status);
3854 bail:
3855 	return status;
3856 }
3857 
3858 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
3859 {
3860 	int status;
3861 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3862 	struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3863 	struct ocfs2_super *osb = lockres->l_priv;
3864 
3865 
3866 	if (ocfs2_is_hard_readonly(osb))
3867 		return -EROFS;
3868 
3869 	if (ocfs2_mount_local(osb))
3870 		return 0;
3871 
3872 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3873 	if (status < 0)
3874 		mlog_errno(status);
3875 
3876 	return status;
3877 }
3878 
3879 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
3880 {
3881 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3882 	struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3883 	struct ocfs2_super *osb = lockres->l_priv;
3884 
3885 	if (!ocfs2_mount_local(osb))
3886 		ocfs2_cluster_unlock(osb, lockres, level);
3887 }
3888 
3889 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3890 				       struct ocfs2_lock_res *lockres)
3891 {
3892 	int status;
3893 	struct ocfs2_unblock_ctl ctl = {0, 0,};
3894 	unsigned long flags;
3895 
3896 	/* Our reference to the lockres in this function can be
3897 	 * considered valid until we remove the OCFS2_LOCK_QUEUED
3898 	 * flag. */
3899 
3900 	BUG_ON(!lockres);
3901 	BUG_ON(!lockres->l_ops);
3902 
3903 	mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
3904 
3905 	/* Detect whether a lock has been marked as going away while
3906 	 * the downconvert thread was processing other things. A lock can
3907 	 * still be marked with OCFS2_LOCK_FREEING after this check,
3908 	 * but short circuiting here will still save us some
3909 	 * performance. */
3910 	spin_lock_irqsave(&lockres->l_lock, flags);
3911 	if (lockres->l_flags & OCFS2_LOCK_FREEING)
3912 		goto unqueue;
3913 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3914 
3915 	status = ocfs2_unblock_lock(osb, lockres, &ctl);
3916 	if (status < 0)
3917 		mlog_errno(status);
3918 
3919 	spin_lock_irqsave(&lockres->l_lock, flags);
3920 unqueue:
3921 	if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3922 		lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3923 	} else
3924 		ocfs2_schedule_blocked_lock(osb, lockres);
3925 
3926 	mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
3927 	     ctl.requeue ? "yes" : "no");
3928 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3929 
3930 	if (ctl.unblock_action != UNBLOCK_CONTINUE
3931 	    && lockres->l_ops->post_unlock)
3932 		lockres->l_ops->post_unlock(osb, lockres);
3933 }
3934 
3935 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3936 					struct ocfs2_lock_res *lockres)
3937 {
3938 	unsigned long flags;
3939 
3940 	assert_spin_locked(&lockres->l_lock);
3941 
3942 	if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3943 		/* Do not schedule a lock for downconvert when it's on
3944 		 * the way to destruction - any nodes wanting access
3945 		 * to the resource will get it soon. */
3946 		mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
3947 		     lockres->l_name, lockres->l_flags);
3948 		return;
3949 	}
3950 
3951 	lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3952 
3953 	spin_lock_irqsave(&osb->dc_task_lock, flags);
3954 	if (list_empty(&lockres->l_blocked_list)) {
3955 		list_add_tail(&lockres->l_blocked_list,
3956 			      &osb->blocked_lock_list);
3957 		osb->blocked_lock_count++;
3958 	}
3959 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3960 }
3961 
3962 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3963 {
3964 	unsigned long processed;
3965 	unsigned long flags;
3966 	struct ocfs2_lock_res *lockres;
3967 
3968 	spin_lock_irqsave(&osb->dc_task_lock, flags);
3969 	/* grab this early so we know to try again if a state change and
3970 	 * wake happens part-way through our work  */
3971 	osb->dc_work_sequence = osb->dc_wake_sequence;
3972 
3973 	processed = osb->blocked_lock_count;
3974 	while (processed) {
3975 		BUG_ON(list_empty(&osb->blocked_lock_list));
3976 
3977 		lockres = list_entry(osb->blocked_lock_list.next,
3978 				     struct ocfs2_lock_res, l_blocked_list);
3979 		list_del_init(&lockres->l_blocked_list);
3980 		osb->blocked_lock_count--;
3981 		spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3982 
3983 		BUG_ON(!processed);
3984 		processed--;
3985 
3986 		ocfs2_process_blocked_lock(osb, lockres);
3987 
3988 		spin_lock_irqsave(&osb->dc_task_lock, flags);
3989 	}
3990 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3991 }
3992 
3993 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3994 {
3995 	int empty = 0;
3996 	unsigned long flags;
3997 
3998 	spin_lock_irqsave(&osb->dc_task_lock, flags);
3999 	if (list_empty(&osb->blocked_lock_list))
4000 		empty = 1;
4001 
4002 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4003 	return empty;
4004 }
4005 
4006 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4007 {
4008 	int should_wake = 0;
4009 	unsigned long flags;
4010 
4011 	spin_lock_irqsave(&osb->dc_task_lock, flags);
4012 	if (osb->dc_work_sequence != osb->dc_wake_sequence)
4013 		should_wake = 1;
4014 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4015 
4016 	return should_wake;
4017 }
4018 
4019 static int ocfs2_downconvert_thread(void *arg)
4020 {
4021 	int status = 0;
4022 	struct ocfs2_super *osb = arg;
4023 
4024 	/* only quit once we've been asked to stop and there is no more
4025 	 * work available */
4026 	while (!(kthread_should_stop() &&
4027 		ocfs2_downconvert_thread_lists_empty(osb))) {
4028 
4029 		wait_event_interruptible(osb->dc_event,
4030 					 ocfs2_downconvert_thread_should_wake(osb) ||
4031 					 kthread_should_stop());
4032 
4033 		mlog(0, "downconvert_thread: awoken\n");
4034 
4035 		ocfs2_downconvert_thread_do_work(osb);
4036 	}
4037 
4038 	osb->dc_task = NULL;
4039 	return status;
4040 }
4041 
4042 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4043 {
4044 	unsigned long flags;
4045 
4046 	spin_lock_irqsave(&osb->dc_task_lock, flags);
4047 	/* make sure the voting thread gets a swipe at whatever changes
4048 	 * the caller may have made to the voting state */
4049 	osb->dc_wake_sequence++;
4050 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4051 	wake_up(&osb->dc_event);
4052 }
4053