xref: /linux/fs/xfs/xfs_dquot_item.c (revision 3ad0876554cafa368f574d4d408468510543e9ff)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_quota.h"
26 #include "xfs_error.h"
27 #include "xfs_trans.h"
28 #include "xfs_buf_item.h"
29 #include "xfs_trans_priv.h"
30 #include "xfs_qm.h"
31 #include "xfs_log.h"
32 
33 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
34 {
35 	return container_of(lip, struct xfs_dq_logitem, qli_item);
36 }
37 
38 /*
39  * returns the number of iovecs needed to log the given dquot item.
40  */
41 STATIC void
42 xfs_qm_dquot_logitem_size(
43 	struct xfs_log_item	*lip,
44 	int			*nvecs,
45 	int			*nbytes)
46 {
47 	*nvecs += 2;
48 	*nbytes += sizeof(struct xfs_dq_logformat) +
49 		   sizeof(struct xfs_disk_dquot);
50 }
51 
52 /*
53  * fills in the vector of log iovecs for the given dquot log item.
54  */
55 STATIC void
56 xfs_qm_dquot_logitem_format(
57 	struct xfs_log_item	*lip,
58 	struct xfs_log_vec	*lv)
59 {
60 	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip);
61 	struct xfs_log_iovec	*vecp = NULL;
62 	struct xfs_dq_logformat	*qlf;
63 
64 	qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT);
65 	qlf->qlf_type = XFS_LI_DQUOT;
66 	qlf->qlf_size = 2;
67 	qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id);
68 	qlf->qlf_blkno = qlip->qli_dquot->q_blkno;
69 	qlf->qlf_len = 1;
70 	qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset;
71 	xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat));
72 
73 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT,
74 			&qlip->qli_dquot->q_core,
75 			sizeof(struct xfs_disk_dquot));
76 }
77 
78 /*
79  * Increment the pin count of the given dquot.
80  */
81 STATIC void
82 xfs_qm_dquot_logitem_pin(
83 	struct xfs_log_item	*lip)
84 {
85 	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
86 
87 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
88 	atomic_inc(&dqp->q_pincount);
89 }
90 
91 /*
92  * Decrement the pin count of the given dquot, and wake up
93  * anyone in xfs_dqwait_unpin() if the count goes to 0.	 The
94  * dquot must have been previously pinned with a call to
95  * xfs_qm_dquot_logitem_pin().
96  */
97 STATIC void
98 xfs_qm_dquot_logitem_unpin(
99 	struct xfs_log_item	*lip,
100 	int			remove)
101 {
102 	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
103 
104 	ASSERT(atomic_read(&dqp->q_pincount) > 0);
105 	if (atomic_dec_and_test(&dqp->q_pincount))
106 		wake_up(&dqp->q_pinwait);
107 }
108 
109 STATIC xfs_lsn_t
110 xfs_qm_dquot_logitem_committed(
111 	struct xfs_log_item	*lip,
112 	xfs_lsn_t		lsn)
113 {
114 	/*
115 	 * We always re-log the entire dquot when it becomes dirty,
116 	 * so, the latest copy _is_ the only one that matters.
117 	 */
118 	return lsn;
119 }
120 
121 /*
122  * This is called to wait for the given dquot to be unpinned.
123  * Most of these pin/unpin routines are plagiarized from inode code.
124  */
125 void
126 xfs_qm_dqunpin_wait(
127 	struct xfs_dquot	*dqp)
128 {
129 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
130 	if (atomic_read(&dqp->q_pincount) == 0)
131 		return;
132 
133 	/*
134 	 * Give the log a push so we don't wait here too long.
135 	 */
136 	xfs_log_force(dqp->q_mount, 0);
137 	wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
138 }
139 
140 /*
141  * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
142  * have been failed during writeback
143  *
144  * this informs the AIL that the dquot is already flush locked on the next push,
145  * and acquires a hold on the buffer to ensure that it isn't reclaimed before
146  * dirty data makes it to disk.
147  */
148 STATIC void
149 xfs_dquot_item_error(
150 	struct xfs_log_item	*lip,
151 	struct xfs_buf		*bp)
152 {
153 	ASSERT(!completion_done(&DQUOT_ITEM(lip)->qli_dquot->q_flush));
154 	xfs_set_li_failed(lip, bp);
155 }
156 
157 STATIC uint
158 xfs_qm_dquot_logitem_push(
159 	struct xfs_log_item	*lip,
160 	struct list_head	*buffer_list)
161 		__releases(&lip->li_ailp->ail_lock)
162 		__acquires(&lip->li_ailp->ail_lock)
163 {
164 	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
165 	struct xfs_buf		*bp = lip->li_buf;
166 	uint			rval = XFS_ITEM_SUCCESS;
167 	int			error;
168 
169 	if (atomic_read(&dqp->q_pincount) > 0)
170 		return XFS_ITEM_PINNED;
171 
172 	/*
173 	 * The buffer containing this item failed to be written back
174 	 * previously. Resubmit the buffer for IO
175 	 */
176 	if (lip->li_flags & XFS_LI_FAILED) {
177 		if (!xfs_buf_trylock(bp))
178 			return XFS_ITEM_LOCKED;
179 
180 		if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
181 			rval = XFS_ITEM_FLUSHING;
182 
183 		xfs_buf_unlock(bp);
184 		return rval;
185 	}
186 
187 	if (!xfs_dqlock_nowait(dqp))
188 		return XFS_ITEM_LOCKED;
189 
190 	/*
191 	 * Re-check the pincount now that we stabilized the value by
192 	 * taking the quota lock.
193 	 */
194 	if (atomic_read(&dqp->q_pincount) > 0) {
195 		rval = XFS_ITEM_PINNED;
196 		goto out_unlock;
197 	}
198 
199 	/*
200 	 * Someone else is already flushing the dquot.  Nothing we can do
201 	 * here but wait for the flush to finish and remove the item from
202 	 * the AIL.
203 	 */
204 	if (!xfs_dqflock_nowait(dqp)) {
205 		rval = XFS_ITEM_FLUSHING;
206 		goto out_unlock;
207 	}
208 
209 	spin_unlock(&lip->li_ailp->ail_lock);
210 
211 	error = xfs_qm_dqflush(dqp, &bp);
212 	if (error) {
213 		xfs_warn(dqp->q_mount, "%s: push error %d on dqp "PTR_FMT,
214 			__func__, error, dqp);
215 	} else {
216 		if (!xfs_buf_delwri_queue(bp, buffer_list))
217 			rval = XFS_ITEM_FLUSHING;
218 		xfs_buf_relse(bp);
219 	}
220 
221 	spin_lock(&lip->li_ailp->ail_lock);
222 out_unlock:
223 	xfs_dqunlock(dqp);
224 	return rval;
225 }
226 
227 /*
228  * Unlock the dquot associated with the log item.
229  * Clear the fields of the dquot and dquot log item that
230  * are specific to the current transaction.  If the
231  * hold flags is set, do not unlock the dquot.
232  */
233 STATIC void
234 xfs_qm_dquot_logitem_unlock(
235 	struct xfs_log_item	*lip)
236 {
237 	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
238 
239 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
240 
241 	/*
242 	 * Clear the transaction pointer in the dquot
243 	 */
244 	dqp->q_transp = NULL;
245 
246 	/*
247 	 * dquots are never 'held' from getting unlocked at the end of
248 	 * a transaction.  Their locking and unlocking is hidden inside the
249 	 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
250 	 * for the logitem.
251 	 */
252 	xfs_dqunlock(dqp);
253 }
254 
255 /*
256  * this needs to stamp an lsn into the dquot, I think.
257  * rpc's that look at user dquot's would then have to
258  * push on the dependency recorded in the dquot
259  */
260 STATIC void
261 xfs_qm_dquot_logitem_committing(
262 	struct xfs_log_item	*lip,
263 	xfs_lsn_t		lsn)
264 {
265 }
266 
267 /*
268  * This is the ops vector for dquots
269  */
270 static const struct xfs_item_ops xfs_dquot_item_ops = {
271 	.iop_size	= xfs_qm_dquot_logitem_size,
272 	.iop_format	= xfs_qm_dquot_logitem_format,
273 	.iop_pin	= xfs_qm_dquot_logitem_pin,
274 	.iop_unpin	= xfs_qm_dquot_logitem_unpin,
275 	.iop_unlock	= xfs_qm_dquot_logitem_unlock,
276 	.iop_committed	= xfs_qm_dquot_logitem_committed,
277 	.iop_push	= xfs_qm_dquot_logitem_push,
278 	.iop_committing = xfs_qm_dquot_logitem_committing,
279 	.iop_error	= xfs_dquot_item_error
280 };
281 
282 /*
283  * Initialize the dquot log item for a newly allocated dquot.
284  * The dquot isn't locked at this point, but it isn't on any of the lists
285  * either, so we don't care.
286  */
287 void
288 xfs_qm_dquot_logitem_init(
289 	struct xfs_dquot	*dqp)
290 {
291 	struct xfs_dq_logitem	*lp = &dqp->q_logitem;
292 
293 	xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
294 					&xfs_dquot_item_ops);
295 	lp->qli_dquot = dqp;
296 }
297 
298 /*------------------  QUOTAOFF LOG ITEMS  -------------------*/
299 
300 static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
301 {
302 	return container_of(lip, struct xfs_qoff_logitem, qql_item);
303 }
304 
305 
306 /*
307  * This returns the number of iovecs needed to log the given quotaoff item.
308  * We only need 1 iovec for an quotaoff item.  It just logs the
309  * quotaoff_log_format structure.
310  */
311 STATIC void
312 xfs_qm_qoff_logitem_size(
313 	struct xfs_log_item	*lip,
314 	int			*nvecs,
315 	int			*nbytes)
316 {
317 	*nvecs += 1;
318 	*nbytes += sizeof(struct xfs_qoff_logitem);
319 }
320 
321 STATIC void
322 xfs_qm_qoff_logitem_format(
323 	struct xfs_log_item	*lip,
324 	struct xfs_log_vec	*lv)
325 {
326 	struct xfs_qoff_logitem	*qflip = QOFF_ITEM(lip);
327 	struct xfs_log_iovec	*vecp = NULL;
328 	struct xfs_qoff_logformat *qlf;
329 
330 	qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QUOTAOFF);
331 	qlf->qf_type = XFS_LI_QUOTAOFF;
332 	qlf->qf_size = 1;
333 	qlf->qf_flags = qflip->qql_flags;
334 	xlog_finish_iovec(lv, vecp, sizeof(struct xfs_qoff_logitem));
335 }
336 
337 /*
338  * Pinning has no meaning for an quotaoff item, so just return.
339  */
340 STATIC void
341 xfs_qm_qoff_logitem_pin(
342 	struct xfs_log_item	*lip)
343 {
344 }
345 
346 /*
347  * Since pinning has no meaning for an quotaoff item, unpinning does
348  * not either.
349  */
350 STATIC void
351 xfs_qm_qoff_logitem_unpin(
352 	struct xfs_log_item	*lip,
353 	int			remove)
354 {
355 }
356 
357 /*
358  * There isn't much you can do to push a quotaoff item.  It is simply
359  * stuck waiting for the log to be flushed to disk.
360  */
361 STATIC uint
362 xfs_qm_qoff_logitem_push(
363 	struct xfs_log_item	*lip,
364 	struct list_head	*buffer_list)
365 {
366 	return XFS_ITEM_LOCKED;
367 }
368 
369 /*
370  * Quotaoff items have no locking or pushing, so return failure
371  * so that the caller doesn't bother with us.
372  */
373 STATIC void
374 xfs_qm_qoff_logitem_unlock(
375 	struct xfs_log_item	*lip)
376 {
377 }
378 
379 /*
380  * The quotaoff-start-item is logged only once and cannot be moved in the log,
381  * so simply return the lsn at which it's been logged.
382  */
383 STATIC xfs_lsn_t
384 xfs_qm_qoff_logitem_committed(
385 	struct xfs_log_item	*lip,
386 	xfs_lsn_t		lsn)
387 {
388 	return lsn;
389 }
390 
391 STATIC xfs_lsn_t
392 xfs_qm_qoffend_logitem_committed(
393 	struct xfs_log_item	*lip,
394 	xfs_lsn_t		lsn)
395 {
396 	struct xfs_qoff_logitem	*qfe = QOFF_ITEM(lip);
397 	struct xfs_qoff_logitem	*qfs = qfe->qql_start_lip;
398 	struct xfs_ail		*ailp = qfs->qql_item.li_ailp;
399 
400 	/*
401 	 * Delete the qoff-start logitem from the AIL.
402 	 * xfs_trans_ail_delete() drops the AIL lock.
403 	 */
404 	spin_lock(&ailp->ail_lock);
405 	xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
406 
407 	kmem_free(qfs->qql_item.li_lv_shadow);
408 	kmem_free(lip->li_lv_shadow);
409 	kmem_free(qfs);
410 	kmem_free(qfe);
411 	return (xfs_lsn_t)-1;
412 }
413 
414 /*
415  * XXX rcc - don't know quite what to do with this.  I think we can
416  * just ignore it.  The only time that isn't the case is if we allow
417  * the client to somehow see that quotas have been turned off in which
418  * we can't allow that to get back until the quotaoff hits the disk.
419  * So how would that happen?  Also, do we need different routines for
420  * quotaoff start and quotaoff end?  I suspect the answer is yes but
421  * to be sure, I need to look at the recovery code and see how quota off
422  * recovery is handled (do we roll forward or back or do something else).
423  * If we roll forwards or backwards, then we need two separate routines,
424  * one that does nothing and one that stamps in the lsn that matters
425  * (truly makes the quotaoff irrevocable).  If we do something else,
426  * then maybe we don't need two.
427  */
428 STATIC void
429 xfs_qm_qoff_logitem_committing(
430 	struct xfs_log_item	*lip,
431 	xfs_lsn_t		commit_lsn)
432 {
433 }
434 
435 static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
436 	.iop_size	= xfs_qm_qoff_logitem_size,
437 	.iop_format	= xfs_qm_qoff_logitem_format,
438 	.iop_pin	= xfs_qm_qoff_logitem_pin,
439 	.iop_unpin	= xfs_qm_qoff_logitem_unpin,
440 	.iop_unlock	= xfs_qm_qoff_logitem_unlock,
441 	.iop_committed	= xfs_qm_qoffend_logitem_committed,
442 	.iop_push	= xfs_qm_qoff_logitem_push,
443 	.iop_committing = xfs_qm_qoff_logitem_committing
444 };
445 
446 /*
447  * This is the ops vector shared by all quotaoff-start log items.
448  */
449 static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
450 	.iop_size	= xfs_qm_qoff_logitem_size,
451 	.iop_format	= xfs_qm_qoff_logitem_format,
452 	.iop_pin	= xfs_qm_qoff_logitem_pin,
453 	.iop_unpin	= xfs_qm_qoff_logitem_unpin,
454 	.iop_unlock	= xfs_qm_qoff_logitem_unlock,
455 	.iop_committed	= xfs_qm_qoff_logitem_committed,
456 	.iop_push	= xfs_qm_qoff_logitem_push,
457 	.iop_committing = xfs_qm_qoff_logitem_committing
458 };
459 
460 /*
461  * Allocate and initialize an quotaoff item of the correct quota type(s).
462  */
463 struct xfs_qoff_logitem *
464 xfs_qm_qoff_logitem_init(
465 	struct xfs_mount	*mp,
466 	struct xfs_qoff_logitem	*start,
467 	uint			flags)
468 {
469 	struct xfs_qoff_logitem	*qf;
470 
471 	qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
472 
473 	xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
474 			&xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
475 	qf->qql_item.li_mountp = mp;
476 	qf->qql_start_lip = start;
477 	qf->qql_flags = flags;
478 	return qf;
479 }
480