xref: /linux/fs/xfs/xfs_bmap_util.c (revision ab520be8cd5d56867fc95cfbc34b90880faf1f9d)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * Copyright (c) 2012 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_defer.h"
29 #include "xfs_inode.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_extfree_item.h"
33 #include "xfs_alloc.h"
34 #include "xfs_bmap.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_quota.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_log.h"
44 #include "xfs_rmap_btree.h"
45 #include "xfs_iomap.h"
46 #include "xfs_reflink.h"
47 #include "xfs_refcount.h"
48 
49 /* Kernel only BMAP related definitions and functions */
50 
51 /*
52  * Convert the given file system block to a disk block.  We have to treat it
53  * differently based on whether the file is a real time file or not, because the
54  * bmap code does.
55  */
56 xfs_daddr_t
57 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58 {
59 	return (XFS_IS_REALTIME_INODE(ip) ? \
60 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62 }
63 
64 /*
65  * Routine to zero an extent on disk allocated to the specific inode.
66  *
67  * The VFS functions take a linearised filesystem block offset, so we have to
68  * convert the sparse xfs fsb to the right format first.
69  * VFS types are real funky, too.
70  */
71 int
72 xfs_zero_extent(
73 	struct xfs_inode *ip,
74 	xfs_fsblock_t	start_fsb,
75 	xfs_off_t	count_fsb)
76 {
77 	struct xfs_mount *mp = ip->i_mount;
78 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
79 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
80 
81 	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 		block << (mp->m_super->s_blocksize_bits - 9),
83 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
84 		GFP_NOFS, true);
85 }
86 
87 int
88 xfs_bmap_rtalloc(
89 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
90 {
91 	xfs_alloctype_t	atype = 0;	/* type for allocation routines */
92 	int		error;		/* error return value */
93 	xfs_mount_t	*mp;		/* mount point structure */
94 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
95 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
96 	xfs_extlen_t	align;		/* minimum allocation alignment */
97 	xfs_rtblock_t	rtb;
98 
99 	mp = ap->ip->i_mount;
100 	align = xfs_get_extsz_hint(ap->ip);
101 	prod = align / mp->m_sb.sb_rextsize;
102 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
103 					align, 1, ap->eof, 0,
104 					ap->conv, &ap->offset, &ap->length);
105 	if (error)
106 		return error;
107 	ASSERT(ap->length);
108 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
109 
110 	/*
111 	 * If the offset & length are not perfectly aligned
112 	 * then kill prod, it will just get us in trouble.
113 	 */
114 	if (do_mod(ap->offset, align) || ap->length % align)
115 		prod = 1;
116 	/*
117 	 * Set ralen to be the actual requested length in rtextents.
118 	 */
119 	ralen = ap->length / mp->m_sb.sb_rextsize;
120 	/*
121 	 * If the old value was close enough to MAXEXTLEN that
122 	 * we rounded up to it, cut it back so it's valid again.
123 	 * Note that if it's a really large request (bigger than
124 	 * MAXEXTLEN), we don't hear about that number, and can't
125 	 * adjust the starting point to match it.
126 	 */
127 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
128 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
129 
130 	/*
131 	 * Lock out modifications to both the RT bitmap and summary inodes
132 	 */
133 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
134 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
135 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
136 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
137 
138 	/*
139 	 * If it's an allocation to an empty file at offset 0,
140 	 * pick an extent that will space things out in the rt area.
141 	 */
142 	if (ap->eof && ap->offset == 0) {
143 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
144 
145 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
146 		if (error)
147 			return error;
148 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
149 	} else {
150 		ap->blkno = 0;
151 	}
152 
153 	xfs_bmap_adjacent(ap);
154 
155 	/*
156 	 * Realtime allocation, done through xfs_rtallocate_extent.
157 	 */
158 	atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
159 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
160 	rtb = ap->blkno;
161 	ap->length = ralen;
162 	if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
163 				&ralen, atype, ap->wasdel, prod, &rtb)))
164 		return error;
165 	if (rtb == NULLFSBLOCK && prod > 1 &&
166 	    (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
167 					   ap->length, &ralen, atype,
168 					   ap->wasdel, 1, &rtb)))
169 		return error;
170 	ap->blkno = rtb;
171 	if (ap->blkno != NULLFSBLOCK) {
172 		ap->blkno *= mp->m_sb.sb_rextsize;
173 		ralen *= mp->m_sb.sb_rextsize;
174 		ap->length = ralen;
175 		ap->ip->i_d.di_nblocks += ralen;
176 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
177 		if (ap->wasdel)
178 			ap->ip->i_delayed_blks -= ralen;
179 		/*
180 		 * Adjust the disk quota also. This was reserved
181 		 * earlier.
182 		 */
183 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
184 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
185 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
186 
187 		/* Zero the extent if we were asked to do so */
188 		if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
189 			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
190 			if (error)
191 				return error;
192 		}
193 	} else {
194 		ap->length = 0;
195 	}
196 	return 0;
197 }
198 
199 /*
200  * Check if the endoff is outside the last extent. If so the caller will grow
201  * the allocation to a stripe unit boundary.  All offsets are considered outside
202  * the end of file for an empty fork, so 1 is returned in *eof in that case.
203  */
204 int
205 xfs_bmap_eof(
206 	struct xfs_inode	*ip,
207 	xfs_fileoff_t		endoff,
208 	int			whichfork,
209 	int			*eof)
210 {
211 	struct xfs_bmbt_irec	rec;
212 	int			error;
213 
214 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
215 	if (error || *eof)
216 		return error;
217 
218 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
219 	return 0;
220 }
221 
222 /*
223  * Extent tree block counting routines.
224  */
225 
226 /*
227  * Count leaf blocks given a range of extent records.
228  */
229 STATIC void
230 xfs_bmap_count_leaves(
231 	xfs_ifork_t		*ifp,
232 	xfs_extnum_t		idx,
233 	int			numrecs,
234 	int			*count)
235 {
236 	int		b;
237 
238 	for (b = 0; b < numrecs; b++) {
239 		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
240 		*count += xfs_bmbt_get_blockcount(frp);
241 	}
242 }
243 
244 /*
245  * Count leaf blocks given a range of extent records originally
246  * in btree format.
247  */
248 STATIC void
249 xfs_bmap_disk_count_leaves(
250 	struct xfs_mount	*mp,
251 	struct xfs_btree_block	*block,
252 	int			numrecs,
253 	int			*count)
254 {
255 	int		b;
256 	xfs_bmbt_rec_t	*frp;
257 
258 	for (b = 1; b <= numrecs; b++) {
259 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
260 		*count += xfs_bmbt_disk_get_blockcount(frp);
261 	}
262 }
263 
264 /*
265  * Recursively walks each level of a btree
266  * to count total fsblocks in use.
267  */
268 STATIC int                                     /* error */
269 xfs_bmap_count_tree(
270 	xfs_mount_t     *mp,            /* file system mount point */
271 	xfs_trans_t     *tp,            /* transaction pointer */
272 	xfs_ifork_t	*ifp,		/* inode fork pointer */
273 	xfs_fsblock_t   blockno,	/* file system block number */
274 	int             levelin,	/* level in btree */
275 	int		*count)		/* Count of blocks */
276 {
277 	int			error;
278 	xfs_buf_t		*bp, *nbp;
279 	int			level = levelin;
280 	__be64			*pp;
281 	xfs_fsblock_t           bno = blockno;
282 	xfs_fsblock_t		nextbno;
283 	struct xfs_btree_block	*block, *nextblock;
284 	int			numrecs;
285 
286 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
287 						&xfs_bmbt_buf_ops);
288 	if (error)
289 		return error;
290 	*count += 1;
291 	block = XFS_BUF_TO_BLOCK(bp);
292 
293 	if (--level) {
294 		/* Not at node above leaves, count this level of nodes */
295 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
296 		while (nextbno != NULLFSBLOCK) {
297 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
298 						XFS_BMAP_BTREE_REF,
299 						&xfs_bmbt_buf_ops);
300 			if (error)
301 				return error;
302 			*count += 1;
303 			nextblock = XFS_BUF_TO_BLOCK(nbp);
304 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
305 			xfs_trans_brelse(tp, nbp);
306 		}
307 
308 		/* Dive to the next level */
309 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
310 		bno = be64_to_cpu(*pp);
311 		if (unlikely((error =
312 		     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
313 			xfs_trans_brelse(tp, bp);
314 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
315 					 XFS_ERRLEVEL_LOW, mp);
316 			return -EFSCORRUPTED;
317 		}
318 		xfs_trans_brelse(tp, bp);
319 	} else {
320 		/* count all level 1 nodes and their leaves */
321 		for (;;) {
322 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
323 			numrecs = be16_to_cpu(block->bb_numrecs);
324 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
325 			xfs_trans_brelse(tp, bp);
326 			if (nextbno == NULLFSBLOCK)
327 				break;
328 			bno = nextbno;
329 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
330 						XFS_BMAP_BTREE_REF,
331 						&xfs_bmbt_buf_ops);
332 			if (error)
333 				return error;
334 			*count += 1;
335 			block = XFS_BUF_TO_BLOCK(bp);
336 		}
337 	}
338 	return 0;
339 }
340 
341 /*
342  * Count fsblocks of the given fork.
343  */
344 static int					/* error */
345 xfs_bmap_count_blocks(
346 	xfs_trans_t		*tp,		/* transaction pointer */
347 	xfs_inode_t		*ip,		/* incore inode */
348 	int			whichfork,	/* data or attr fork */
349 	int			*count)		/* out: count of blocks */
350 {
351 	struct xfs_btree_block	*block;	/* current btree block */
352 	xfs_fsblock_t		bno;	/* block # of "block" */
353 	xfs_ifork_t		*ifp;	/* fork structure */
354 	int			level;	/* btree level, for checking */
355 	xfs_mount_t		*mp;	/* file system mount structure */
356 	__be64			*pp;	/* pointer to block address */
357 
358 	bno = NULLFSBLOCK;
359 	mp = ip->i_mount;
360 	ifp = XFS_IFORK_PTR(ip, whichfork);
361 	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
362 		xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count);
363 		return 0;
364 	}
365 
366 	/*
367 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
368 	 */
369 	block = ifp->if_broot;
370 	level = be16_to_cpu(block->bb_level);
371 	ASSERT(level > 0);
372 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
373 	bno = be64_to_cpu(*pp);
374 	ASSERT(bno != NULLFSBLOCK);
375 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
376 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
377 
378 	if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
379 		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
380 				 mp);
381 		return -EFSCORRUPTED;
382 	}
383 
384 	return 0;
385 }
386 
387 /*
388  * returns 1 for success, 0 if we failed to map the extent.
389  */
390 STATIC int
391 xfs_getbmapx_fix_eof_hole(
392 	xfs_inode_t		*ip,		/* xfs incore inode pointer */
393 	int			whichfork,
394 	struct getbmapx		*out,		/* output structure */
395 	int			prealloced,	/* this is a file with
396 						 * preallocated data space */
397 	__int64_t		end,		/* last block requested */
398 	xfs_fsblock_t		startblock,
399 	bool			moretocome)
400 {
401 	__int64_t		fixlen;
402 	xfs_mount_t		*mp;		/* file system mount point */
403 	xfs_ifork_t		*ifp;		/* inode fork pointer */
404 	xfs_extnum_t		lastx;		/* last extent pointer */
405 	xfs_fileoff_t		fileblock;
406 
407 	if (startblock == HOLESTARTBLOCK) {
408 		mp = ip->i_mount;
409 		out->bmv_block = -1;
410 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
411 		fixlen -= out->bmv_offset;
412 		if (prealloced && out->bmv_offset + out->bmv_length == end) {
413 			/* Came to hole at EOF. Trim it. */
414 			if (fixlen <= 0)
415 				return 0;
416 			out->bmv_length = fixlen;
417 		}
418 	} else {
419 		if (startblock == DELAYSTARTBLOCK)
420 			out->bmv_block = -2;
421 		else
422 			out->bmv_block = xfs_fsb_to_db(ip, startblock);
423 		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
424 		ifp = XFS_IFORK_PTR(ip, whichfork);
425 		if (!moretocome &&
426 		    xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
427 		   (lastx == xfs_iext_count(ifp) - 1))
428 			out->bmv_oflags |= BMV_OF_LAST;
429 	}
430 
431 	return 1;
432 }
433 
434 /* Adjust the reported bmap around shared/unshared extent transitions. */
435 STATIC int
436 xfs_getbmap_adjust_shared(
437 	struct xfs_inode		*ip,
438 	int				whichfork,
439 	struct xfs_bmbt_irec		*map,
440 	struct getbmapx			*out,
441 	struct xfs_bmbt_irec		*next_map)
442 {
443 	struct xfs_mount		*mp = ip->i_mount;
444 	xfs_agnumber_t			agno;
445 	xfs_agblock_t			agbno;
446 	xfs_agblock_t			ebno;
447 	xfs_extlen_t			elen;
448 	xfs_extlen_t			nlen;
449 	int				error;
450 
451 	next_map->br_startblock = NULLFSBLOCK;
452 	next_map->br_startoff = NULLFILEOFF;
453 	next_map->br_blockcount = 0;
454 
455 	/* Only written data blocks can be shared. */
456 	if (!xfs_is_reflink_inode(ip) || whichfork != XFS_DATA_FORK ||
457 	    map->br_startblock == DELAYSTARTBLOCK ||
458 	    map->br_startblock == HOLESTARTBLOCK ||
459 	    ISUNWRITTEN(map))
460 		return 0;
461 
462 	agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
463 	agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
464 	error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
465 			&ebno, &elen, true);
466 	if (error)
467 		return error;
468 
469 	if (ebno == NULLAGBLOCK) {
470 		/* No shared blocks at all. */
471 		return 0;
472 	} else if (agbno == ebno) {
473 		/*
474 		 * Shared extent at (agbno, elen).  Shrink the reported
475 		 * extent length and prepare to move the start of map[i]
476 		 * to agbno+elen, with the aim of (re)formatting the new
477 		 * map[i] the next time through the inner loop.
478 		 */
479 		out->bmv_length = XFS_FSB_TO_BB(mp, elen);
480 		out->bmv_oflags |= BMV_OF_SHARED;
481 		if (elen != map->br_blockcount) {
482 			*next_map = *map;
483 			next_map->br_startblock += elen;
484 			next_map->br_startoff += elen;
485 			next_map->br_blockcount -= elen;
486 		}
487 		map->br_blockcount -= elen;
488 	} else {
489 		/*
490 		 * There's an unshared extent (agbno, ebno - agbno)
491 		 * followed by shared extent at (ebno, elen).  Shrink
492 		 * the reported extent length to cover only the unshared
493 		 * extent and prepare to move up the start of map[i] to
494 		 * ebno, with the aim of (re)formatting the new map[i]
495 		 * the next time through the inner loop.
496 		 */
497 		*next_map = *map;
498 		nlen = ebno - agbno;
499 		out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
500 		next_map->br_startblock += nlen;
501 		next_map->br_startoff += nlen;
502 		next_map->br_blockcount -= nlen;
503 		map->br_blockcount -= nlen;
504 	}
505 
506 	return 0;
507 }
508 
509 /*
510  * Get inode's extents as described in bmv, and format for output.
511  * Calls formatter to fill the user's buffer until all extents
512  * are mapped, until the passed-in bmv->bmv_count slots have
513  * been filled, or until the formatter short-circuits the loop,
514  * if it is tracking filled-in extents on its own.
515  */
516 int						/* error code */
517 xfs_getbmap(
518 	xfs_inode_t		*ip,
519 	struct getbmapx		*bmv,		/* user bmap structure */
520 	xfs_bmap_format_t	formatter,	/* format to user */
521 	void			*arg)		/* formatter arg */
522 {
523 	__int64_t		bmvend;		/* last block requested */
524 	int			error = 0;	/* return value */
525 	__int64_t		fixlen;		/* length for -1 case */
526 	int			i;		/* extent number */
527 	int			lock;		/* lock state */
528 	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
529 	xfs_mount_t		*mp;		/* file system mount point */
530 	int			nex;		/* # of user extents can do */
531 	int			subnex;		/* # of bmapi's can do */
532 	int			nmap;		/* number of map entries */
533 	struct getbmapx		*out;		/* output structure */
534 	int			whichfork;	/* data or attr fork */
535 	int			prealloced;	/* this is a file with
536 						 * preallocated data space */
537 	int			iflags;		/* interface flags */
538 	int			bmapi_flags;	/* flags for xfs_bmapi */
539 	int			cur_ext = 0;
540 	struct xfs_bmbt_irec	inject_map;
541 
542 	mp = ip->i_mount;
543 	iflags = bmv->bmv_iflags;
544 
545 #ifndef DEBUG
546 	/* Only allow CoW fork queries if we're debugging. */
547 	if (iflags & BMV_IF_COWFORK)
548 		return -EINVAL;
549 #endif
550 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
551 		return -EINVAL;
552 
553 	if (iflags & BMV_IF_ATTRFORK)
554 		whichfork = XFS_ATTR_FORK;
555 	else if (iflags & BMV_IF_COWFORK)
556 		whichfork = XFS_COW_FORK;
557 	else
558 		whichfork = XFS_DATA_FORK;
559 
560 	switch (whichfork) {
561 	case XFS_ATTR_FORK:
562 		if (XFS_IFORK_Q(ip)) {
563 			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
564 			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
565 			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
566 				return -EINVAL;
567 		} else if (unlikely(
568 			   ip->i_d.di_aformat != 0 &&
569 			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
570 			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
571 					 ip->i_mount);
572 			return -EFSCORRUPTED;
573 		}
574 
575 		prealloced = 0;
576 		fixlen = 1LL << 32;
577 		break;
578 	case XFS_COW_FORK:
579 		if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
580 			return -EINVAL;
581 
582 		if (xfs_get_cowextsz_hint(ip)) {
583 			prealloced = 1;
584 			fixlen = mp->m_super->s_maxbytes;
585 		} else {
586 			prealloced = 0;
587 			fixlen = XFS_ISIZE(ip);
588 		}
589 		break;
590 	default:
591 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
592 		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
593 		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
594 			return -EINVAL;
595 
596 		if (xfs_get_extsz_hint(ip) ||
597 		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
598 			prealloced = 1;
599 			fixlen = mp->m_super->s_maxbytes;
600 		} else {
601 			prealloced = 0;
602 			fixlen = XFS_ISIZE(ip);
603 		}
604 		break;
605 	}
606 
607 	if (bmv->bmv_length == -1) {
608 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
609 		bmv->bmv_length =
610 			max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
611 	} else if (bmv->bmv_length == 0) {
612 		bmv->bmv_entries = 0;
613 		return 0;
614 	} else if (bmv->bmv_length < 0) {
615 		return -EINVAL;
616 	}
617 
618 	nex = bmv->bmv_count - 1;
619 	if (nex <= 0)
620 		return -EINVAL;
621 	bmvend = bmv->bmv_offset + bmv->bmv_length;
622 
623 
624 	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
625 		return -ENOMEM;
626 	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
627 	if (!out)
628 		return -ENOMEM;
629 
630 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
631 	switch (whichfork) {
632 	case XFS_DATA_FORK:
633 		if (!(iflags & BMV_IF_DELALLOC) &&
634 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
635 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
636 			if (error)
637 				goto out_unlock_iolock;
638 
639 			/*
640 			 * Even after flushing the inode, there can still be
641 			 * delalloc blocks on the inode beyond EOF due to
642 			 * speculative preallocation.  These are not removed
643 			 * until the release function is called or the inode
644 			 * is inactivated.  Hence we cannot assert here that
645 			 * ip->i_delayed_blks == 0.
646 			 */
647 		}
648 
649 		lock = xfs_ilock_data_map_shared(ip);
650 		break;
651 	case XFS_COW_FORK:
652 		lock = XFS_ILOCK_SHARED;
653 		xfs_ilock(ip, lock);
654 		break;
655 	case XFS_ATTR_FORK:
656 		lock = xfs_ilock_attr_map_shared(ip);
657 		break;
658 	}
659 
660 	/*
661 	 * Don't let nex be bigger than the number of extents
662 	 * we can have assuming alternating holes and real extents.
663 	 */
664 	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
665 		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
666 
667 	bmapi_flags = xfs_bmapi_aflag(whichfork);
668 	if (!(iflags & BMV_IF_PREALLOC))
669 		bmapi_flags |= XFS_BMAPI_IGSTATE;
670 
671 	/*
672 	 * Allocate enough space to handle "subnex" maps at a time.
673 	 */
674 	error = -ENOMEM;
675 	subnex = 16;
676 	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
677 	if (!map)
678 		goto out_unlock_ilock;
679 
680 	bmv->bmv_entries = 0;
681 
682 	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
683 	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
684 		error = 0;
685 		goto out_free_map;
686 	}
687 
688 	do {
689 		nmap = (nex> subnex) ? subnex : nex;
690 		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
691 				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
692 				       map, &nmap, bmapi_flags);
693 		if (error)
694 			goto out_free_map;
695 		ASSERT(nmap <= subnex);
696 
697 		for (i = 0; i < nmap && bmv->bmv_length &&
698 				cur_ext < bmv->bmv_count - 1; i++) {
699 			out[cur_ext].bmv_oflags = 0;
700 			if (map[i].br_state == XFS_EXT_UNWRITTEN)
701 				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
702 			else if (map[i].br_startblock == DELAYSTARTBLOCK)
703 				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
704 			out[cur_ext].bmv_offset =
705 				XFS_FSB_TO_BB(mp, map[i].br_startoff);
706 			out[cur_ext].bmv_length =
707 				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
708 			out[cur_ext].bmv_unused1 = 0;
709 			out[cur_ext].bmv_unused2 = 0;
710 
711 			/*
712 			 * delayed allocation extents that start beyond EOF can
713 			 * occur due to speculative EOF allocation when the
714 			 * delalloc extent is larger than the largest freespace
715 			 * extent at conversion time. These extents cannot be
716 			 * converted by data writeback, so can exist here even
717 			 * if we are not supposed to be finding delalloc
718 			 * extents.
719 			 */
720 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
721 			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
722 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
723 
724                         if (map[i].br_startblock == HOLESTARTBLOCK &&
725 			    whichfork == XFS_ATTR_FORK) {
726 				/* came to the end of attribute fork */
727 				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
728 				goto out_free_map;
729 			}
730 
731 			/* Is this a shared block? */
732 			error = xfs_getbmap_adjust_shared(ip, whichfork,
733 					&map[i], &out[cur_ext], &inject_map);
734 			if (error)
735 				goto out_free_map;
736 
737 			if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
738 					&out[cur_ext], prealloced, bmvend,
739 					map[i].br_startblock,
740 					inject_map.br_startblock != NULLFSBLOCK))
741 				goto out_free_map;
742 
743 			bmv->bmv_offset =
744 				out[cur_ext].bmv_offset +
745 				out[cur_ext].bmv_length;
746 			bmv->bmv_length =
747 				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
748 
749 			/*
750 			 * In case we don't want to return the hole,
751 			 * don't increase cur_ext so that we can reuse
752 			 * it in the next loop.
753 			 */
754 			if ((iflags & BMV_IF_NO_HOLES) &&
755 			    map[i].br_startblock == HOLESTARTBLOCK) {
756 				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
757 				continue;
758 			}
759 
760 			/*
761 			 * In order to report shared extents accurately,
762 			 * we report each distinct shared/unshared part
763 			 * of a single bmbt record using multiple bmap
764 			 * extents.  To make that happen, we iterate the
765 			 * same map array item multiple times, each
766 			 * time trimming out the subextent that we just
767 			 * reported.
768 			 *
769 			 * Because of this, we must check the out array
770 			 * index (cur_ext) directly against bmv_count-1
771 			 * to avoid overflows.
772 			 */
773 			if (inject_map.br_startblock != NULLFSBLOCK) {
774 				map[i] = inject_map;
775 				i--;
776 			}
777 			bmv->bmv_entries++;
778 			cur_ext++;
779 		}
780 	} while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
781 
782  out_free_map:
783 	kmem_free(map);
784  out_unlock_ilock:
785 	xfs_iunlock(ip, lock);
786  out_unlock_iolock:
787 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
788 
789 	for (i = 0; i < cur_ext; i++) {
790 		int full = 0;	/* user array is full */
791 
792 		/* format results & advance arg */
793 		error = formatter(&arg, &out[i], &full);
794 		if (error || full)
795 			break;
796 	}
797 
798 	kmem_free(out);
799 	return error;
800 }
801 
802 /*
803  * dead simple method of punching delalyed allocation blocks from a range in
804  * the inode. Walks a block at a time so will be slow, but is only executed in
805  * rare error cases so the overhead is not critical. This will always punch out
806  * both the start and end blocks, even if the ranges only partially overlap
807  * them, so it is up to the caller to ensure that partial blocks are not
808  * passed in.
809  */
810 int
811 xfs_bmap_punch_delalloc_range(
812 	struct xfs_inode	*ip,
813 	xfs_fileoff_t		start_fsb,
814 	xfs_fileoff_t		length)
815 {
816 	xfs_fileoff_t		remaining = length;
817 	int			error = 0;
818 
819 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
820 
821 	do {
822 		int		done;
823 		xfs_bmbt_irec_t	imap;
824 		int		nimaps = 1;
825 		xfs_fsblock_t	firstblock;
826 		struct xfs_defer_ops dfops;
827 
828 		/*
829 		 * Map the range first and check that it is a delalloc extent
830 		 * before trying to unmap the range. Otherwise we will be
831 		 * trying to remove a real extent (which requires a
832 		 * transaction) or a hole, which is probably a bad idea...
833 		 */
834 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
835 				       XFS_BMAPI_ENTIRE);
836 
837 		if (error) {
838 			/* something screwed, just bail */
839 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
840 				xfs_alert(ip->i_mount,
841 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
842 						ip->i_ino, start_fsb);
843 			}
844 			break;
845 		}
846 		if (!nimaps) {
847 			/* nothing there */
848 			goto next_block;
849 		}
850 		if (imap.br_startblock != DELAYSTARTBLOCK) {
851 			/* been converted, ignore */
852 			goto next_block;
853 		}
854 		WARN_ON(imap.br_blockcount == 0);
855 
856 		/*
857 		 * Note: while we initialise the firstblock/dfops pair, they
858 		 * should never be used because blocks should never be
859 		 * allocated or freed for a delalloc extent and hence we need
860 		 * don't cancel or finish them after the xfs_bunmapi() call.
861 		 */
862 		xfs_defer_init(&dfops, &firstblock);
863 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
864 					&dfops, &done);
865 		if (error)
866 			break;
867 
868 		ASSERT(!xfs_defer_has_unfinished_work(&dfops));
869 next_block:
870 		start_fsb++;
871 		remaining--;
872 	} while(remaining > 0);
873 
874 	return error;
875 }
876 
877 /*
878  * Test whether it is appropriate to check an inode for and free post EOF
879  * blocks. The 'force' parameter determines whether we should also consider
880  * regular files that are marked preallocated or append-only.
881  */
882 bool
883 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
884 {
885 	/* prealloc/delalloc exists only on regular files */
886 	if (!S_ISREG(VFS_I(ip)->i_mode))
887 		return false;
888 
889 	/*
890 	 * Zero sized files with no cached pages and delalloc blocks will not
891 	 * have speculative prealloc/delalloc blocks to remove.
892 	 */
893 	if (VFS_I(ip)->i_size == 0 &&
894 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
895 	    ip->i_delayed_blks == 0)
896 		return false;
897 
898 	/* If we haven't read in the extent list, then don't do it now. */
899 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
900 		return false;
901 
902 	/*
903 	 * Do not free real preallocated or append-only files unless the file
904 	 * has delalloc blocks and we are forced to remove them.
905 	 */
906 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
907 		if (!force || ip->i_delayed_blks == 0)
908 			return false;
909 
910 	return true;
911 }
912 
913 /*
914  * This is called by xfs_inactive to free any blocks beyond eof
915  * when the link count isn't zero and by xfs_dm_punch_hole() when
916  * punching a hole to EOF.
917  */
918 int
919 xfs_free_eofblocks(
920 	xfs_mount_t	*mp,
921 	xfs_inode_t	*ip,
922 	bool		need_iolock)
923 {
924 	xfs_trans_t	*tp;
925 	int		error;
926 	xfs_fileoff_t	end_fsb;
927 	xfs_fileoff_t	last_fsb;
928 	xfs_filblks_t	map_len;
929 	int		nimaps;
930 	xfs_bmbt_irec_t	imap;
931 
932 	/*
933 	 * Figure out if there are any blocks beyond the end
934 	 * of the file.  If not, then there is nothing to do.
935 	 */
936 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
937 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
938 	if (last_fsb <= end_fsb)
939 		return 0;
940 	map_len = last_fsb - end_fsb;
941 
942 	nimaps = 1;
943 	xfs_ilock(ip, XFS_ILOCK_SHARED);
944 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
945 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
946 
947 	if (!error && (nimaps != 0) &&
948 	    (imap.br_startblock != HOLESTARTBLOCK ||
949 	     ip->i_delayed_blks)) {
950 		/*
951 		 * Attach the dquots to the inode up front.
952 		 */
953 		error = xfs_qm_dqattach(ip, 0);
954 		if (error)
955 			return error;
956 
957 		/*
958 		 * There are blocks after the end of file.
959 		 * Free them up now by truncating the file to
960 		 * its current size.
961 		 */
962 		if (need_iolock) {
963 			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
964 				return -EAGAIN;
965 		}
966 
967 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
968 				&tp);
969 		if (error) {
970 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
971 			if (need_iolock)
972 				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
973 			return error;
974 		}
975 
976 		xfs_ilock(ip, XFS_ILOCK_EXCL);
977 		xfs_trans_ijoin(tp, ip, 0);
978 
979 		/*
980 		 * Do not update the on-disk file size.  If we update the
981 		 * on-disk file size and then the system crashes before the
982 		 * contents of the file are flushed to disk then the files
983 		 * may be full of holes (ie NULL files bug).
984 		 */
985 		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
986 					      XFS_ISIZE(ip));
987 		if (error) {
988 			/*
989 			 * If we get an error at this point we simply don't
990 			 * bother truncating the file.
991 			 */
992 			xfs_trans_cancel(tp);
993 		} else {
994 			error = xfs_trans_commit(tp);
995 			if (!error)
996 				xfs_inode_clear_eofblocks_tag(ip);
997 		}
998 
999 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1000 		if (need_iolock)
1001 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1002 	}
1003 	return error;
1004 }
1005 
1006 int
1007 xfs_alloc_file_space(
1008 	struct xfs_inode	*ip,
1009 	xfs_off_t		offset,
1010 	xfs_off_t		len,
1011 	int			alloc_type)
1012 {
1013 	xfs_mount_t		*mp = ip->i_mount;
1014 	xfs_off_t		count;
1015 	xfs_filblks_t		allocated_fsb;
1016 	xfs_filblks_t		allocatesize_fsb;
1017 	xfs_extlen_t		extsz, temp;
1018 	xfs_fileoff_t		startoffset_fsb;
1019 	xfs_fsblock_t		firstfsb;
1020 	int			nimaps;
1021 	int			quota_flag;
1022 	int			rt;
1023 	xfs_trans_t		*tp;
1024 	xfs_bmbt_irec_t		imaps[1], *imapp;
1025 	struct xfs_defer_ops	dfops;
1026 	uint			qblocks, resblks, resrtextents;
1027 	int			error;
1028 
1029 	trace_xfs_alloc_file_space(ip);
1030 
1031 	if (XFS_FORCED_SHUTDOWN(mp))
1032 		return -EIO;
1033 
1034 	error = xfs_qm_dqattach(ip, 0);
1035 	if (error)
1036 		return error;
1037 
1038 	if (len <= 0)
1039 		return -EINVAL;
1040 
1041 	rt = XFS_IS_REALTIME_INODE(ip);
1042 	extsz = xfs_get_extsz_hint(ip);
1043 
1044 	count = len;
1045 	imapp = &imaps[0];
1046 	nimaps = 1;
1047 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
1048 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1049 
1050 	/*
1051 	 * Allocate file space until done or until there is an error
1052 	 */
1053 	while (allocatesize_fsb && !error) {
1054 		xfs_fileoff_t	s, e;
1055 
1056 		/*
1057 		 * Determine space reservations for data/realtime.
1058 		 */
1059 		if (unlikely(extsz)) {
1060 			s = startoffset_fsb;
1061 			do_div(s, extsz);
1062 			s *= extsz;
1063 			e = startoffset_fsb + allocatesize_fsb;
1064 			if ((temp = do_mod(startoffset_fsb, extsz)))
1065 				e += temp;
1066 			if ((temp = do_mod(e, extsz)))
1067 				e += extsz - temp;
1068 		} else {
1069 			s = 0;
1070 			e = allocatesize_fsb;
1071 		}
1072 
1073 		/*
1074 		 * The transaction reservation is limited to a 32-bit block
1075 		 * count, hence we need to limit the number of blocks we are
1076 		 * trying to reserve to avoid an overflow. We can't allocate
1077 		 * more than @nimaps extents, and an extent is limited on disk
1078 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1079 		 */
1080 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1081 		if (unlikely(rt)) {
1082 			resrtextents = qblocks = resblks;
1083 			resrtextents /= mp->m_sb.sb_rextsize;
1084 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1085 			quota_flag = XFS_QMOPT_RES_RTBLKS;
1086 		} else {
1087 			resrtextents = 0;
1088 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1089 			quota_flag = XFS_QMOPT_RES_REGBLKS;
1090 		}
1091 
1092 		/*
1093 		 * Allocate and setup the transaction.
1094 		 */
1095 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1096 				resrtextents, 0, &tp);
1097 
1098 		/*
1099 		 * Check for running out of space
1100 		 */
1101 		if (error) {
1102 			/*
1103 			 * Free the transaction structure.
1104 			 */
1105 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1106 			break;
1107 		}
1108 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1109 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1110 						      0, quota_flag);
1111 		if (error)
1112 			goto error1;
1113 
1114 		xfs_trans_ijoin(tp, ip, 0);
1115 
1116 		xfs_defer_init(&dfops, &firstfsb);
1117 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1118 					allocatesize_fsb, alloc_type, &firstfsb,
1119 					resblks, imapp, &nimaps, &dfops);
1120 		if (error)
1121 			goto error0;
1122 
1123 		/*
1124 		 * Complete the transaction
1125 		 */
1126 		error = xfs_defer_finish(&tp, &dfops, NULL);
1127 		if (error)
1128 			goto error0;
1129 
1130 		error = xfs_trans_commit(tp);
1131 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1132 		if (error)
1133 			break;
1134 
1135 		allocated_fsb = imapp->br_blockcount;
1136 
1137 		if (nimaps == 0) {
1138 			error = -ENOSPC;
1139 			break;
1140 		}
1141 
1142 		startoffset_fsb += allocated_fsb;
1143 		allocatesize_fsb -= allocated_fsb;
1144 	}
1145 
1146 	return error;
1147 
1148 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1149 	xfs_defer_cancel(&dfops);
1150 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1151 
1152 error1:	/* Just cancel transaction */
1153 	xfs_trans_cancel(tp);
1154 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1155 	return error;
1156 }
1157 
1158 static int
1159 xfs_unmap_extent(
1160 	struct xfs_inode	*ip,
1161 	xfs_fileoff_t		startoffset_fsb,
1162 	xfs_filblks_t		len_fsb,
1163 	int			*done)
1164 {
1165 	struct xfs_mount	*mp = ip->i_mount;
1166 	struct xfs_trans	*tp;
1167 	struct xfs_defer_ops	dfops;
1168 	xfs_fsblock_t		firstfsb;
1169 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1170 	int			error;
1171 
1172 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1173 	if (error) {
1174 		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1175 		return error;
1176 	}
1177 
1178 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1179 	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1180 			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1181 	if (error)
1182 		goto out_trans_cancel;
1183 
1184 	xfs_trans_ijoin(tp, ip, 0);
1185 
1186 	xfs_defer_init(&dfops, &firstfsb);
1187 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1188 			&dfops, done);
1189 	if (error)
1190 		goto out_bmap_cancel;
1191 
1192 	error = xfs_defer_finish(&tp, &dfops, ip);
1193 	if (error)
1194 		goto out_bmap_cancel;
1195 
1196 	error = xfs_trans_commit(tp);
1197 out_unlock:
1198 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1199 	return error;
1200 
1201 out_bmap_cancel:
1202 	xfs_defer_cancel(&dfops);
1203 out_trans_cancel:
1204 	xfs_trans_cancel(tp);
1205 	goto out_unlock;
1206 }
1207 
1208 static int
1209 xfs_adjust_extent_unmap_boundaries(
1210 	struct xfs_inode	*ip,
1211 	xfs_fileoff_t		*startoffset_fsb,
1212 	xfs_fileoff_t		*endoffset_fsb)
1213 {
1214 	struct xfs_mount	*mp = ip->i_mount;
1215 	struct xfs_bmbt_irec	imap;
1216 	int			nimap, error;
1217 	xfs_extlen_t		mod = 0;
1218 
1219 	nimap = 1;
1220 	error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1221 	if (error)
1222 		return error;
1223 
1224 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1225 		xfs_daddr_t	block;
1226 
1227 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1228 		block = imap.br_startblock;
1229 		mod = do_div(block, mp->m_sb.sb_rextsize);
1230 		if (mod)
1231 			*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1232 	}
1233 
1234 	nimap = 1;
1235 	error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1236 	if (error)
1237 		return error;
1238 
1239 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1240 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1241 		mod++;
1242 		if (mod && mod != mp->m_sb.sb_rextsize)
1243 			*endoffset_fsb -= mod;
1244 	}
1245 
1246 	return 0;
1247 }
1248 
1249 static int
1250 xfs_flush_unmap_range(
1251 	struct xfs_inode	*ip,
1252 	xfs_off_t		offset,
1253 	xfs_off_t		len)
1254 {
1255 	struct xfs_mount	*mp = ip->i_mount;
1256 	struct inode		*inode = VFS_I(ip);
1257 	xfs_off_t		rounding, start, end;
1258 	int			error;
1259 
1260 	/* wait for the completion of any pending DIOs */
1261 	inode_dio_wait(inode);
1262 
1263 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1264 	start = round_down(offset, rounding);
1265 	end = round_up(offset + len, rounding) - 1;
1266 
1267 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1268 	if (error)
1269 		return error;
1270 	truncate_pagecache_range(inode, start, end);
1271 	return 0;
1272 }
1273 
1274 int
1275 xfs_free_file_space(
1276 	struct xfs_inode	*ip,
1277 	xfs_off_t		offset,
1278 	xfs_off_t		len)
1279 {
1280 	struct xfs_mount	*mp = ip->i_mount;
1281 	xfs_fileoff_t		startoffset_fsb;
1282 	xfs_fileoff_t		endoffset_fsb;
1283 	int			done = 0, error;
1284 
1285 	trace_xfs_free_file_space(ip);
1286 
1287 	error = xfs_qm_dqattach(ip, 0);
1288 	if (error)
1289 		return error;
1290 
1291 	if (len <= 0)	/* if nothing being freed */
1292 		return 0;
1293 
1294 	error = xfs_flush_unmap_range(ip, offset, len);
1295 	if (error)
1296 		return error;
1297 
1298 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1299 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1300 
1301 	/*
1302 	 * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1303 	 * and we can't use unwritten extents then we actually need to ensure
1304 	 * to zero the whole extent, otherwise we just need to take of block
1305 	 * boundaries, and xfs_bunmapi will handle the rest.
1306 	 */
1307 	if (XFS_IS_REALTIME_INODE(ip) &&
1308 	    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1309 		error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1310 				&endoffset_fsb);
1311 		if (error)
1312 			return error;
1313 	}
1314 
1315 	if (endoffset_fsb > startoffset_fsb) {
1316 		while (!done) {
1317 			error = xfs_unmap_extent(ip, startoffset_fsb,
1318 					endoffset_fsb - startoffset_fsb, &done);
1319 			if (error)
1320 				return error;
1321 		}
1322 	}
1323 
1324 	/*
1325 	 * Now that we've unmap all full blocks we'll have to zero out any
1326 	 * partial block at the beginning and/or end.  xfs_zero_range is
1327 	 * smart enough to skip any holes, including those we just created.
1328 	 */
1329 	return xfs_zero_range(ip, offset, len, NULL);
1330 }
1331 
1332 /*
1333  * Preallocate and zero a range of a file. This mechanism has the allocation
1334  * semantics of fallocate and in addition converts data in the range to zeroes.
1335  */
1336 int
1337 xfs_zero_file_space(
1338 	struct xfs_inode	*ip,
1339 	xfs_off_t		offset,
1340 	xfs_off_t		len)
1341 {
1342 	struct xfs_mount	*mp = ip->i_mount;
1343 	uint			blksize;
1344 	int			error;
1345 
1346 	trace_xfs_zero_file_space(ip);
1347 
1348 	blksize = 1 << mp->m_sb.sb_blocklog;
1349 
1350 	/*
1351 	 * Punch a hole and prealloc the range. We use hole punch rather than
1352 	 * unwritten extent conversion for two reasons:
1353 	 *
1354 	 * 1.) Hole punch handles partial block zeroing for us.
1355 	 *
1356 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1357 	 * by virtue of the hole punch.
1358 	 */
1359 	error = xfs_free_file_space(ip, offset, len);
1360 	if (error)
1361 		goto out;
1362 
1363 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1364 				     round_up(offset + len, blksize) -
1365 				     round_down(offset, blksize),
1366 				     XFS_BMAPI_PREALLOC);
1367 out:
1368 	return error;
1369 
1370 }
1371 
1372 /*
1373  * @next_fsb will keep track of the extent currently undergoing shift.
1374  * @stop_fsb will keep track of the extent at which we have to stop.
1375  * If we are shifting left, we will start with block (offset + len) and
1376  * shift each extent till last extent.
1377  * If we are shifting right, we will start with last extent inside file space
1378  * and continue until we reach the block corresponding to offset.
1379  */
1380 static int
1381 xfs_shift_file_space(
1382 	struct xfs_inode        *ip,
1383 	xfs_off_t               offset,
1384 	xfs_off_t               len,
1385 	enum shift_direction	direction)
1386 {
1387 	int			done = 0;
1388 	struct xfs_mount	*mp = ip->i_mount;
1389 	struct xfs_trans	*tp;
1390 	int			error;
1391 	struct xfs_defer_ops	dfops;
1392 	xfs_fsblock_t		first_block;
1393 	xfs_fileoff_t		stop_fsb;
1394 	xfs_fileoff_t		next_fsb;
1395 	xfs_fileoff_t		shift_fsb;
1396 
1397 	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1398 
1399 	if (direction == SHIFT_LEFT) {
1400 		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1401 		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1402 	} else {
1403 		/*
1404 		 * If right shift, delegate the work of initialization of
1405 		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1406 		 */
1407 		next_fsb = NULLFSBLOCK;
1408 		stop_fsb = XFS_B_TO_FSB(mp, offset);
1409 	}
1410 
1411 	shift_fsb = XFS_B_TO_FSB(mp, len);
1412 
1413 	/*
1414 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1415 	 * into the accessible region of the file.
1416 	 */
1417 	if (xfs_can_free_eofblocks(ip, true)) {
1418 		error = xfs_free_eofblocks(mp, ip, false);
1419 		if (error)
1420 			return error;
1421 	}
1422 
1423 	/*
1424 	 * Writeback and invalidate cache for the remainder of the file as we're
1425 	 * about to shift down every extent from offset to EOF.
1426 	 */
1427 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1428 					     offset, -1);
1429 	if (error)
1430 		return error;
1431 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1432 					offset >> PAGE_SHIFT, -1);
1433 	if (error)
1434 		return error;
1435 
1436 	/*
1437 	 * The extent shiting code works on extent granularity. So, if
1438 	 * stop_fsb is not the starting block of extent, we need to split
1439 	 * the extent at stop_fsb.
1440 	 */
1441 	if (direction == SHIFT_RIGHT) {
1442 		error = xfs_bmap_split_extent(ip, stop_fsb);
1443 		if (error)
1444 			return error;
1445 	}
1446 
1447 	while (!error && !done) {
1448 		/*
1449 		 * We would need to reserve permanent block for transaction.
1450 		 * This will come into picture when after shifting extent into
1451 		 * hole we found that adjacent extents can be merged which
1452 		 * may lead to freeing of a block during record update.
1453 		 */
1454 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1455 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1456 		if (error)
1457 			break;
1458 
1459 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1460 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1461 				ip->i_gdquot, ip->i_pdquot,
1462 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1463 				XFS_QMOPT_RES_REGBLKS);
1464 		if (error)
1465 			goto out_trans_cancel;
1466 
1467 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1468 
1469 		xfs_defer_init(&dfops, &first_block);
1470 
1471 		/*
1472 		 * We are using the write transaction in which max 2 bmbt
1473 		 * updates are allowed
1474 		 */
1475 		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1476 				&done, stop_fsb, &first_block, &dfops,
1477 				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1478 		if (error)
1479 			goto out_bmap_cancel;
1480 
1481 		error = xfs_defer_finish(&tp, &dfops, NULL);
1482 		if (error)
1483 			goto out_bmap_cancel;
1484 
1485 		error = xfs_trans_commit(tp);
1486 	}
1487 
1488 	return error;
1489 
1490 out_bmap_cancel:
1491 	xfs_defer_cancel(&dfops);
1492 out_trans_cancel:
1493 	xfs_trans_cancel(tp);
1494 	return error;
1495 }
1496 
1497 /*
1498  * xfs_collapse_file_space()
1499  *	This routine frees disk space and shift extent for the given file.
1500  *	The first thing we do is to free data blocks in the specified range
1501  *	by calling xfs_free_file_space(). It would also sync dirty data
1502  *	and invalidate page cache over the region on which collapse range
1503  *	is working. And Shift extent records to the left to cover a hole.
1504  * RETURNS:
1505  *	0 on success
1506  *	errno on error
1507  *
1508  */
1509 int
1510 xfs_collapse_file_space(
1511 	struct xfs_inode	*ip,
1512 	xfs_off_t		offset,
1513 	xfs_off_t		len)
1514 {
1515 	int error;
1516 
1517 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1518 	trace_xfs_collapse_file_space(ip);
1519 
1520 	error = xfs_free_file_space(ip, offset, len);
1521 	if (error)
1522 		return error;
1523 
1524 	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1525 }
1526 
1527 /*
1528  * xfs_insert_file_space()
1529  *	This routine create hole space by shifting extents for the given file.
1530  *	The first thing we do is to sync dirty data and invalidate page cache
1531  *	over the region on which insert range is working. And split an extent
1532  *	to two extents at given offset by calling xfs_bmap_split_extent.
1533  *	And shift all extent records which are laying between [offset,
1534  *	last allocated extent] to the right to reserve hole range.
1535  * RETURNS:
1536  *	0 on success
1537  *	errno on error
1538  */
1539 int
1540 xfs_insert_file_space(
1541 	struct xfs_inode	*ip,
1542 	loff_t			offset,
1543 	loff_t			len)
1544 {
1545 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1546 	trace_xfs_insert_file_space(ip);
1547 
1548 	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1549 }
1550 
1551 /*
1552  * We need to check that the format of the data fork in the temporary inode is
1553  * valid for the target inode before doing the swap. This is not a problem with
1554  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1555  * data fork depending on the space the attribute fork is taking so we can get
1556  * invalid formats on the target inode.
1557  *
1558  * E.g. target has space for 7 extents in extent format, temp inode only has
1559  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1560  * btree, but when swapped it needs to be in extent format. Hence we can't just
1561  * blindly swap data forks on attr2 filesystems.
1562  *
1563  * Note that we check the swap in both directions so that we don't end up with
1564  * a corrupt temporary inode, either.
1565  *
1566  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1567  * inode will prevent this situation from occurring, so all we do here is
1568  * reject and log the attempt. basically we are putting the responsibility on
1569  * userspace to get this right.
1570  */
1571 static int
1572 xfs_swap_extents_check_format(
1573 	struct xfs_inode	*ip,	/* target inode */
1574 	struct xfs_inode	*tip)	/* tmp inode */
1575 {
1576 
1577 	/* Should never get a local format */
1578 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1579 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1580 		return -EINVAL;
1581 
1582 	/*
1583 	 * if the target inode has less extents that then temporary inode then
1584 	 * why did userspace call us?
1585 	 */
1586 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1587 		return -EINVAL;
1588 
1589 	/*
1590 	 * If we have to use the (expensive) rmap swap method, we can
1591 	 * handle any number of extents and any format.
1592 	 */
1593 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1594 		return 0;
1595 
1596 	/*
1597 	 * if the target inode is in extent form and the temp inode is in btree
1598 	 * form then we will end up with the target inode in the wrong format
1599 	 * as we already know there are less extents in the temp inode.
1600 	 */
1601 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1602 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1603 		return -EINVAL;
1604 
1605 	/* Check temp in extent form to max in target */
1606 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1607 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1608 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1609 		return -EINVAL;
1610 
1611 	/* Check target in extent form to max in temp */
1612 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1613 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1614 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1615 		return -EINVAL;
1616 
1617 	/*
1618 	 * If we are in a btree format, check that the temp root block will fit
1619 	 * in the target and that it has enough extents to be in btree format
1620 	 * in the target.
1621 	 *
1622 	 * Note that we have to be careful to allow btree->extent conversions
1623 	 * (a common defrag case) which will occur when the temp inode is in
1624 	 * extent format...
1625 	 */
1626 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1627 		if (XFS_IFORK_BOFF(ip) &&
1628 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1629 			return -EINVAL;
1630 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1631 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1632 			return -EINVAL;
1633 	}
1634 
1635 	/* Reciprocal target->temp btree format checks */
1636 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1637 		if (XFS_IFORK_BOFF(tip) &&
1638 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1639 			return -EINVAL;
1640 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1641 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1642 			return -EINVAL;
1643 	}
1644 
1645 	return 0;
1646 }
1647 
1648 static int
1649 xfs_swap_extent_flush(
1650 	struct xfs_inode	*ip)
1651 {
1652 	int	error;
1653 
1654 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1655 	if (error)
1656 		return error;
1657 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1658 
1659 	/* Verify O_DIRECT for ftmp */
1660 	if (VFS_I(ip)->i_mapping->nrpages)
1661 		return -EINVAL;
1662 	return 0;
1663 }
1664 
1665 /*
1666  * Move extents from one file to another, when rmap is enabled.
1667  */
1668 STATIC int
1669 xfs_swap_extent_rmap(
1670 	struct xfs_trans		**tpp,
1671 	struct xfs_inode		*ip,
1672 	struct xfs_inode		*tip)
1673 {
1674 	struct xfs_bmbt_irec		irec;
1675 	struct xfs_bmbt_irec		uirec;
1676 	struct xfs_bmbt_irec		tirec;
1677 	xfs_fileoff_t			offset_fsb;
1678 	xfs_fileoff_t			end_fsb;
1679 	xfs_filblks_t			count_fsb;
1680 	xfs_fsblock_t			firstfsb;
1681 	struct xfs_defer_ops		dfops;
1682 	int				error;
1683 	xfs_filblks_t			ilen;
1684 	xfs_filblks_t			rlen;
1685 	int				nimaps;
1686 	__uint64_t			tip_flags2;
1687 
1688 	/*
1689 	 * If the source file has shared blocks, we must flag the donor
1690 	 * file as having shared blocks so that we get the shared-block
1691 	 * rmap functions when we go to fix up the rmaps.  The flags
1692 	 * will be switch for reals later.
1693 	 */
1694 	tip_flags2 = tip->i_d.di_flags2;
1695 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1696 		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1697 
1698 	offset_fsb = 0;
1699 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1700 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1701 
1702 	while (count_fsb) {
1703 		/* Read extent from the donor file */
1704 		nimaps = 1;
1705 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1706 				&nimaps, 0);
1707 		if (error)
1708 			goto out;
1709 		ASSERT(nimaps == 1);
1710 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1711 
1712 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1713 		ilen = tirec.br_blockcount;
1714 
1715 		/* Unmap the old blocks in the source file. */
1716 		while (tirec.br_blockcount) {
1717 			xfs_defer_init(&dfops, &firstfsb);
1718 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1719 
1720 			/* Read extent from the source file */
1721 			nimaps = 1;
1722 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1723 					tirec.br_blockcount, &irec,
1724 					&nimaps, 0);
1725 			if (error)
1726 				goto out_defer;
1727 			ASSERT(nimaps == 1);
1728 			ASSERT(tirec.br_startoff == irec.br_startoff);
1729 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1730 
1731 			/* Trim the extent. */
1732 			uirec = tirec;
1733 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1734 					tirec.br_blockcount,
1735 					irec.br_blockcount);
1736 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1737 
1738 			/* Remove the mapping from the donor file. */
1739 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1740 					tip, &uirec);
1741 			if (error)
1742 				goto out_defer;
1743 
1744 			/* Remove the mapping from the source file. */
1745 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1746 					ip, &irec);
1747 			if (error)
1748 				goto out_defer;
1749 
1750 			/* Map the donor file's blocks into the source file. */
1751 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1752 					ip, &uirec);
1753 			if (error)
1754 				goto out_defer;
1755 
1756 			/* Map the source file's blocks into the donor file. */
1757 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1758 					tip, &irec);
1759 			if (error)
1760 				goto out_defer;
1761 
1762 			error = xfs_defer_finish(tpp, &dfops, ip);
1763 			if (error)
1764 				goto out_defer;
1765 
1766 			tirec.br_startoff += rlen;
1767 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1768 			    tirec.br_startblock != DELAYSTARTBLOCK)
1769 				tirec.br_startblock += rlen;
1770 			tirec.br_blockcount -= rlen;
1771 		}
1772 
1773 		/* Roll on... */
1774 		count_fsb -= ilen;
1775 		offset_fsb += ilen;
1776 	}
1777 
1778 	tip->i_d.di_flags2 = tip_flags2;
1779 	return 0;
1780 
1781 out_defer:
1782 	xfs_defer_cancel(&dfops);
1783 out:
1784 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1785 	tip->i_d.di_flags2 = tip_flags2;
1786 	return error;
1787 }
1788 
1789 /* Swap the extents of two files by swapping data forks. */
1790 STATIC int
1791 xfs_swap_extent_forks(
1792 	struct xfs_trans	*tp,
1793 	struct xfs_inode	*ip,
1794 	struct xfs_inode	*tip,
1795 	int			*src_log_flags,
1796 	int			*target_log_flags)
1797 {
1798 	struct xfs_ifork	tempifp, *ifp, *tifp;
1799 	int			aforkblks = 0;
1800 	int			taforkblks = 0;
1801 	xfs_extnum_t		nextents;
1802 	__uint64_t		tmp;
1803 	int			error;
1804 
1805 	/*
1806 	 * Count the number of extended attribute blocks
1807 	 */
1808 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1809 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1810 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK,
1811 				&aforkblks);
1812 		if (error)
1813 			return error;
1814 	}
1815 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1816 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1817 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1818 				&taforkblks);
1819 		if (error)
1820 			return error;
1821 	}
1822 
1823 	/*
1824 	 * Before we've swapped the forks, lets set the owners of the forks
1825 	 * appropriately. We have to do this as we are demand paging the btree
1826 	 * buffers, and so the validation done on read will expect the owner
1827 	 * field to be correctly set. Once we change the owners, we can swap the
1828 	 * inode forks.
1829 	 */
1830 	if (ip->i_d.di_version == 3 &&
1831 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1832 		(*target_log_flags) |= XFS_ILOG_DOWNER;
1833 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1834 					      tip->i_ino, NULL);
1835 		if (error)
1836 			return error;
1837 	}
1838 
1839 	if (tip->i_d.di_version == 3 &&
1840 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1841 		(*src_log_flags) |= XFS_ILOG_DOWNER;
1842 		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1843 					      ip->i_ino, NULL);
1844 		if (error)
1845 			return error;
1846 	}
1847 
1848 	/*
1849 	 * Swap the data forks of the inodes
1850 	 */
1851 	ifp = &ip->i_df;
1852 	tifp = &tip->i_df;
1853 	tempifp = *ifp;		/* struct copy */
1854 	*ifp = *tifp;		/* struct copy */
1855 	*tifp = tempifp;	/* struct copy */
1856 
1857 	/*
1858 	 * Fix the on-disk inode values
1859 	 */
1860 	tmp = (__uint64_t)ip->i_d.di_nblocks;
1861 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1862 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1863 
1864 	tmp = (__uint64_t) ip->i_d.di_nextents;
1865 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1866 	tip->i_d.di_nextents = tmp;
1867 
1868 	tmp = (__uint64_t) ip->i_d.di_format;
1869 	ip->i_d.di_format = tip->i_d.di_format;
1870 	tip->i_d.di_format = tmp;
1871 
1872 	/*
1873 	 * The extents in the source inode could still contain speculative
1874 	 * preallocation beyond EOF (e.g. the file is open but not modified
1875 	 * while defrag is in progress). In that case, we need to copy over the
1876 	 * number of delalloc blocks the data fork in the source inode is
1877 	 * tracking beyond EOF so that when the fork is truncated away when the
1878 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1879 	 * counter on that inode.
1880 	 */
1881 	ASSERT(tip->i_delayed_blks == 0);
1882 	tip->i_delayed_blks = ip->i_delayed_blks;
1883 	ip->i_delayed_blks = 0;
1884 
1885 	switch (ip->i_d.di_format) {
1886 	case XFS_DINODE_FMT_EXTENTS:
1887 		/*
1888 		 * If the extents fit in the inode, fix the pointer.  Otherwise
1889 		 * it's already NULL or pointing to the extent.
1890 		 */
1891 		nextents = xfs_iext_count(&ip->i_df);
1892 		if (nextents <= XFS_INLINE_EXTS)
1893 			ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
1894 		(*src_log_flags) |= XFS_ILOG_DEXT;
1895 		break;
1896 	case XFS_DINODE_FMT_BTREE:
1897 		ASSERT(ip->i_d.di_version < 3 ||
1898 		       (*src_log_flags & XFS_ILOG_DOWNER));
1899 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1900 		break;
1901 	}
1902 
1903 	switch (tip->i_d.di_format) {
1904 	case XFS_DINODE_FMT_EXTENTS:
1905 		/*
1906 		 * If the extents fit in the inode, fix the pointer.  Otherwise
1907 		 * it's already NULL or pointing to the extent.
1908 		 */
1909 		nextents = xfs_iext_count(&tip->i_df);
1910 		if (nextents <= XFS_INLINE_EXTS)
1911 			tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
1912 		(*target_log_flags) |= XFS_ILOG_DEXT;
1913 		break;
1914 	case XFS_DINODE_FMT_BTREE:
1915 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1916 		ASSERT(tip->i_d.di_version < 3 ||
1917 		       (*target_log_flags & XFS_ILOG_DOWNER));
1918 		break;
1919 	}
1920 
1921 	return 0;
1922 }
1923 
1924 int
1925 xfs_swap_extents(
1926 	struct xfs_inode	*ip,	/* target inode */
1927 	struct xfs_inode	*tip,	/* tmp inode */
1928 	struct xfs_swapext	*sxp)
1929 {
1930 	struct xfs_mount	*mp = ip->i_mount;
1931 	struct xfs_trans	*tp;
1932 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1933 	int			src_log_flags, target_log_flags;
1934 	int			error = 0;
1935 	int			lock_flags;
1936 	struct xfs_ifork	*cowfp;
1937 	__uint64_t		f;
1938 	int			resblks;
1939 
1940 	/*
1941 	 * Lock the inodes against other IO, page faults and truncate to
1942 	 * begin with.  Then we can ensure the inodes are flushed and have no
1943 	 * page cache safely. Once we have done this we can take the ilocks and
1944 	 * do the rest of the checks.
1945 	 */
1946 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1947 	lock_flags = XFS_MMAPLOCK_EXCL;
1948 	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1949 
1950 	/* Verify that both files have the same format */
1951 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1952 		error = -EINVAL;
1953 		goto out_unlock;
1954 	}
1955 
1956 	/* Verify both files are either real-time or non-realtime */
1957 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1958 		error = -EINVAL;
1959 		goto out_unlock;
1960 	}
1961 
1962 	error = xfs_swap_extent_flush(ip);
1963 	if (error)
1964 		goto out_unlock;
1965 	error = xfs_swap_extent_flush(tip);
1966 	if (error)
1967 		goto out_unlock;
1968 
1969 	/*
1970 	 * Extent "swapping" with rmap requires a permanent reservation and
1971 	 * a block reservation because it's really just a remap operation
1972 	 * performed with log redo items!
1973 	 */
1974 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1975 		/*
1976 		 * Conceptually this shouldn't affect the shape of either
1977 		 * bmbt, but since we atomically move extents one by one,
1978 		 * we reserve enough space to rebuild both trees.
1979 		 */
1980 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
1981 				XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
1982 				XFS_DATA_FORK) +
1983 			  XFS_SWAP_RMAP_SPACE_RES(mp,
1984 				XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
1985 				XFS_DATA_FORK);
1986 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1987 				0, 0, &tp);
1988 	} else
1989 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0,
1990 				0, 0, &tp);
1991 	if (error)
1992 		goto out_unlock;
1993 
1994 	/*
1995 	 * Lock and join the inodes to the tansaction so that transaction commit
1996 	 * or cancel will unlock the inodes from this point onwards.
1997 	 */
1998 	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1999 	lock_flags |= XFS_ILOCK_EXCL;
2000 	xfs_trans_ijoin(tp, ip, 0);
2001 	xfs_trans_ijoin(tp, tip, 0);
2002 
2003 
2004 	/* Verify all data are being swapped */
2005 	if (sxp->sx_offset != 0 ||
2006 	    sxp->sx_length != ip->i_d.di_size ||
2007 	    sxp->sx_length != tip->i_d.di_size) {
2008 		error = -EFAULT;
2009 		goto out_trans_cancel;
2010 	}
2011 
2012 	trace_xfs_swap_extent_before(ip, 0);
2013 	trace_xfs_swap_extent_before(tip, 1);
2014 
2015 	/* check inode formats now that data is flushed */
2016 	error = xfs_swap_extents_check_format(ip, tip);
2017 	if (error) {
2018 		xfs_notice(mp,
2019 		    "%s: inode 0x%llx format is incompatible for exchanging.",
2020 				__func__, ip->i_ino);
2021 		goto out_trans_cancel;
2022 	}
2023 
2024 	/*
2025 	 * Compare the current change & modify times with that
2026 	 * passed in.  If they differ, we abort this swap.
2027 	 * This is the mechanism used to ensure the calling
2028 	 * process that the file was not changed out from
2029 	 * under it.
2030 	 */
2031 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
2032 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
2033 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
2034 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
2035 		error = -EBUSY;
2036 		goto out_trans_cancel;
2037 	}
2038 
2039 	/*
2040 	 * Note the trickiness in setting the log flags - we set the owner log
2041 	 * flag on the opposite inode (i.e. the inode we are setting the new
2042 	 * owner to be) because once we swap the forks and log that, log
2043 	 * recovery is going to see the fork as owned by the swapped inode,
2044 	 * not the pre-swapped inodes.
2045 	 */
2046 	src_log_flags = XFS_ILOG_CORE;
2047 	target_log_flags = XFS_ILOG_CORE;
2048 
2049 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2050 		error = xfs_swap_extent_rmap(&tp, ip, tip);
2051 	else
2052 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
2053 				&target_log_flags);
2054 	if (error)
2055 		goto out_trans_cancel;
2056 
2057 	/* Do we have to swap reflink flags? */
2058 	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
2059 	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
2060 		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2061 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2062 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2063 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2064 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
2065 		cowfp = ip->i_cowfp;
2066 		ip->i_cowfp = tip->i_cowfp;
2067 		tip->i_cowfp = cowfp;
2068 		xfs_inode_set_cowblocks_tag(ip);
2069 		xfs_inode_set_cowblocks_tag(tip);
2070 	}
2071 
2072 	xfs_trans_log_inode(tp, ip,  src_log_flags);
2073 	xfs_trans_log_inode(tp, tip, target_log_flags);
2074 
2075 	/*
2076 	 * If this is a synchronous mount, make sure that the
2077 	 * transaction goes to disk before returning to the user.
2078 	 */
2079 	if (mp->m_flags & XFS_MOUNT_WSYNC)
2080 		xfs_trans_set_sync(tp);
2081 
2082 	error = xfs_trans_commit(tp);
2083 
2084 	trace_xfs_swap_extent_after(ip, 0);
2085 	trace_xfs_swap_extent_after(tip, 1);
2086 
2087 out_unlock:
2088 	xfs_iunlock(ip, lock_flags);
2089 	xfs_iunlock(tip, lock_flags);
2090 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2091 	return error;
2092 
2093 out_trans_cancel:
2094 	xfs_trans_cancel(tp);
2095 	goto out_unlock;
2096 }
2097