xref: /linux/fs/xfs/xfs_file.c (revision 164666fa66669d437bdcc8d5f1744a2aee73be41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_util.h"
18 #include "xfs_dir2.h"
19 #include "xfs_dir2_priv.h"
20 #include "xfs_ioctl.h"
21 #include "xfs_trace.h"
22 #include "xfs_log.h"
23 #include "xfs_icache.h"
24 #include "xfs_pnfs.h"
25 #include "xfs_iomap.h"
26 #include "xfs_reflink.h"
27 
28 #include <linux/falloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mman.h>
31 #include <linux/fadvise.h>
32 #include <linux/mount.h>
33 
34 static const struct vm_operations_struct xfs_file_vm_ops;
35 
36 /*
37  * Decide if the given file range is aligned to the size of the fundamental
38  * allocation unit for the file.
39  */
40 static bool
41 xfs_is_falloc_aligned(
42 	struct xfs_inode	*ip,
43 	loff_t			pos,
44 	long long int		len)
45 {
46 	struct xfs_mount	*mp = ip->i_mount;
47 	uint64_t		mask;
48 
49 	if (XFS_IS_REALTIME_INODE(ip)) {
50 		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
51 			u64	rextbytes;
52 			u32	mod;
53 
54 			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
55 			div_u64_rem(pos, rextbytes, &mod);
56 			if (mod)
57 				return false;
58 			div_u64_rem(len, rextbytes, &mod);
59 			return mod == 0;
60 		}
61 		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
62 	} else {
63 		mask = mp->m_sb.sb_blocksize - 1;
64 	}
65 
66 	return !((pos | len) & mask);
67 }
68 
69 int
70 xfs_update_prealloc_flags(
71 	struct xfs_inode	*ip,
72 	enum xfs_prealloc_flags	flags)
73 {
74 	struct xfs_trans	*tp;
75 	int			error;
76 
77 	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
78 			0, 0, 0, &tp);
79 	if (error)
80 		return error;
81 
82 	xfs_ilock(ip, XFS_ILOCK_EXCL);
83 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
84 
85 	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
86 		VFS_I(ip)->i_mode &= ~S_ISUID;
87 		if (VFS_I(ip)->i_mode & S_IXGRP)
88 			VFS_I(ip)->i_mode &= ~S_ISGID;
89 		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
90 	}
91 
92 	if (flags & XFS_PREALLOC_SET)
93 		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
94 	if (flags & XFS_PREALLOC_CLEAR)
95 		ip->i_diflags &= ~XFS_DIFLAG_PREALLOC;
96 
97 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
98 	if (flags & XFS_PREALLOC_SYNC)
99 		xfs_trans_set_sync(tp);
100 	return xfs_trans_commit(tp);
101 }
102 
103 /*
104  * Fsync operations on directories are much simpler than on regular files,
105  * as there is no file data to flush, and thus also no need for explicit
106  * cache flush operations, and there are no non-transaction metadata updates
107  * on directories either.
108  */
109 STATIC int
110 xfs_dir_fsync(
111 	struct file		*file,
112 	loff_t			start,
113 	loff_t			end,
114 	int			datasync)
115 {
116 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
117 
118 	trace_xfs_dir_fsync(ip);
119 	return xfs_log_force_inode(ip);
120 }
121 
122 static xfs_csn_t
123 xfs_fsync_seq(
124 	struct xfs_inode	*ip,
125 	bool			datasync)
126 {
127 	if (!xfs_ipincount(ip))
128 		return 0;
129 	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
130 		return 0;
131 	return ip->i_itemp->ili_commit_seq;
132 }
133 
134 /*
135  * All metadata updates are logged, which means that we just have to flush the
136  * log up to the latest LSN that touched the inode.
137  *
138  * If we have concurrent fsync/fdatasync() calls, we need them to all block on
139  * the log force before we clear the ili_fsync_fields field. This ensures that
140  * we don't get a racing sync operation that does not wait for the metadata to
141  * hit the journal before returning.  If we race with clearing ili_fsync_fields,
142  * then all that will happen is the log force will do nothing as the lsn will
143  * already be on disk.  We can't race with setting ili_fsync_fields because that
144  * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
145  * shared until after the ili_fsync_fields is cleared.
146  */
147 static  int
148 xfs_fsync_flush_log(
149 	struct xfs_inode	*ip,
150 	bool			datasync,
151 	int			*log_flushed)
152 {
153 	int			error = 0;
154 	xfs_csn_t		seq;
155 
156 	xfs_ilock(ip, XFS_ILOCK_SHARED);
157 	seq = xfs_fsync_seq(ip, datasync);
158 	if (seq) {
159 		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
160 					  log_flushed);
161 
162 		spin_lock(&ip->i_itemp->ili_lock);
163 		ip->i_itemp->ili_fsync_fields = 0;
164 		spin_unlock(&ip->i_itemp->ili_lock);
165 	}
166 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
167 	return error;
168 }
169 
170 STATIC int
171 xfs_file_fsync(
172 	struct file		*file,
173 	loff_t			start,
174 	loff_t			end,
175 	int			datasync)
176 {
177 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
178 	struct xfs_mount	*mp = ip->i_mount;
179 	int			error = 0;
180 	int			log_flushed = 0;
181 
182 	trace_xfs_file_fsync(ip);
183 
184 	error = file_write_and_wait_range(file, start, end);
185 	if (error)
186 		return error;
187 
188 	if (xfs_is_shutdown(mp))
189 		return -EIO;
190 
191 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
192 
193 	/*
194 	 * If we have an RT and/or log subvolume we need to make sure to flush
195 	 * the write cache the device used for file data first.  This is to
196 	 * ensure newly written file data make it to disk before logging the new
197 	 * inode size in case of an extending write.
198 	 */
199 	if (XFS_IS_REALTIME_INODE(ip))
200 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
201 	else if (mp->m_logdev_targp != mp->m_ddev_targp)
202 		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
203 
204 	/*
205 	 * Any inode that has dirty modifications in the log is pinned.  The
206 	 * racy check here for a pinned inode while not catch modifications
207 	 * that happen concurrently to the fsync call, but fsync semantics
208 	 * only require to sync previously completed I/O.
209 	 */
210 	if (xfs_ipincount(ip))
211 		error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
212 
213 	/*
214 	 * If we only have a single device, and the log force about was
215 	 * a no-op we might have to flush the data device cache here.
216 	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
217 	 * an already allocated file and thus do not have any metadata to
218 	 * commit.
219 	 */
220 	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
221 	    mp->m_logdev_targp == mp->m_ddev_targp)
222 		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
223 
224 	return error;
225 }
226 
227 static int
228 xfs_ilock_iocb(
229 	struct kiocb		*iocb,
230 	unsigned int		lock_mode)
231 {
232 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
233 
234 	if (iocb->ki_flags & IOCB_NOWAIT) {
235 		if (!xfs_ilock_nowait(ip, lock_mode))
236 			return -EAGAIN;
237 	} else {
238 		xfs_ilock(ip, lock_mode);
239 	}
240 
241 	return 0;
242 }
243 
244 STATIC ssize_t
245 xfs_file_dio_read(
246 	struct kiocb		*iocb,
247 	struct iov_iter		*to)
248 {
249 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
250 	ssize_t			ret;
251 
252 	trace_xfs_file_direct_read(iocb, to);
253 
254 	if (!iov_iter_count(to))
255 		return 0; /* skip atime */
256 
257 	file_accessed(iocb->ki_filp);
258 
259 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
260 	if (ret)
261 		return ret;
262 	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, 0);
263 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
264 
265 	return ret;
266 }
267 
268 static noinline ssize_t
269 xfs_file_dax_read(
270 	struct kiocb		*iocb,
271 	struct iov_iter		*to)
272 {
273 	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
274 	ssize_t			ret = 0;
275 
276 	trace_xfs_file_dax_read(iocb, to);
277 
278 	if (!iov_iter_count(to))
279 		return 0; /* skip atime */
280 
281 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
282 	if (ret)
283 		return ret;
284 	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
285 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
286 
287 	file_accessed(iocb->ki_filp);
288 	return ret;
289 }
290 
291 STATIC ssize_t
292 xfs_file_buffered_read(
293 	struct kiocb		*iocb,
294 	struct iov_iter		*to)
295 {
296 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
297 	ssize_t			ret;
298 
299 	trace_xfs_file_buffered_read(iocb, to);
300 
301 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
302 	if (ret)
303 		return ret;
304 	ret = generic_file_read_iter(iocb, to);
305 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
306 
307 	return ret;
308 }
309 
310 STATIC ssize_t
311 xfs_file_read_iter(
312 	struct kiocb		*iocb,
313 	struct iov_iter		*to)
314 {
315 	struct inode		*inode = file_inode(iocb->ki_filp);
316 	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
317 	ssize_t			ret = 0;
318 
319 	XFS_STATS_INC(mp, xs_read_calls);
320 
321 	if (xfs_is_shutdown(mp))
322 		return -EIO;
323 
324 	if (IS_DAX(inode))
325 		ret = xfs_file_dax_read(iocb, to);
326 	else if (iocb->ki_flags & IOCB_DIRECT)
327 		ret = xfs_file_dio_read(iocb, to);
328 	else
329 		ret = xfs_file_buffered_read(iocb, to);
330 
331 	if (ret > 0)
332 		XFS_STATS_ADD(mp, xs_read_bytes, ret);
333 	return ret;
334 }
335 
336 /*
337  * Common pre-write limit and setup checks.
338  *
339  * Called with the iolocked held either shared and exclusive according to
340  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
341  * if called for a direct write beyond i_size.
342  */
343 STATIC ssize_t
344 xfs_file_write_checks(
345 	struct kiocb		*iocb,
346 	struct iov_iter		*from,
347 	int			*iolock)
348 {
349 	struct file		*file = iocb->ki_filp;
350 	struct inode		*inode = file->f_mapping->host;
351 	struct xfs_inode	*ip = XFS_I(inode);
352 	ssize_t			error = 0;
353 	size_t			count = iov_iter_count(from);
354 	bool			drained_dio = false;
355 	loff_t			isize;
356 
357 restart:
358 	error = generic_write_checks(iocb, from);
359 	if (error <= 0)
360 		return error;
361 
362 	if (iocb->ki_flags & IOCB_NOWAIT) {
363 		error = break_layout(inode, false);
364 		if (error == -EWOULDBLOCK)
365 			error = -EAGAIN;
366 	} else {
367 		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
368 	}
369 
370 	if (error)
371 		return error;
372 
373 	/*
374 	 * For changing security info in file_remove_privs() we need i_rwsem
375 	 * exclusively.
376 	 */
377 	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
378 		xfs_iunlock(ip, *iolock);
379 		*iolock = XFS_IOLOCK_EXCL;
380 		error = xfs_ilock_iocb(iocb, *iolock);
381 		if (error) {
382 			*iolock = 0;
383 			return error;
384 		}
385 		goto restart;
386 	}
387 
388 	/*
389 	 * If the offset is beyond the size of the file, we need to zero any
390 	 * blocks that fall between the existing EOF and the start of this
391 	 * write.  If zeroing is needed and we are currently holding the iolock
392 	 * shared, we need to update it to exclusive which implies having to
393 	 * redo all checks before.
394 	 *
395 	 * We need to serialise against EOF updates that occur in IO completions
396 	 * here. We want to make sure that nobody is changing the size while we
397 	 * do this check until we have placed an IO barrier (i.e.  hold the
398 	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
399 	 * spinlock effectively forms a memory barrier once we have the
400 	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
401 	 * hence be able to correctly determine if we need to run zeroing.
402 	 *
403 	 * We can do an unlocked check here safely as IO completion can only
404 	 * extend EOF. Truncate is locked out at this point, so the EOF can
405 	 * not move backwards, only forwards. Hence we only need to take the
406 	 * slow path and spin locks when we are at or beyond the current EOF.
407 	 */
408 	if (iocb->ki_pos <= i_size_read(inode))
409 		goto out;
410 
411 	spin_lock(&ip->i_flags_lock);
412 	isize = i_size_read(inode);
413 	if (iocb->ki_pos > isize) {
414 		spin_unlock(&ip->i_flags_lock);
415 
416 		if (iocb->ki_flags & IOCB_NOWAIT)
417 			return -EAGAIN;
418 
419 		if (!drained_dio) {
420 			if (*iolock == XFS_IOLOCK_SHARED) {
421 				xfs_iunlock(ip, *iolock);
422 				*iolock = XFS_IOLOCK_EXCL;
423 				xfs_ilock(ip, *iolock);
424 				iov_iter_reexpand(from, count);
425 			}
426 			/*
427 			 * We now have an IO submission barrier in place, but
428 			 * AIO can do EOF updates during IO completion and hence
429 			 * we now need to wait for all of them to drain. Non-AIO
430 			 * DIO will have drained before we are given the
431 			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
432 			 * no-op.
433 			 */
434 			inode_dio_wait(inode);
435 			drained_dio = true;
436 			goto restart;
437 		}
438 
439 		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
440 		error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
441 		if (error)
442 			return error;
443 	} else
444 		spin_unlock(&ip->i_flags_lock);
445 
446 out:
447 	return file_modified(file);
448 }
449 
450 static int
451 xfs_dio_write_end_io(
452 	struct kiocb		*iocb,
453 	ssize_t			size,
454 	int			error,
455 	unsigned		flags)
456 {
457 	struct inode		*inode = file_inode(iocb->ki_filp);
458 	struct xfs_inode	*ip = XFS_I(inode);
459 	loff_t			offset = iocb->ki_pos;
460 	unsigned int		nofs_flag;
461 
462 	trace_xfs_end_io_direct_write(ip, offset, size);
463 
464 	if (xfs_is_shutdown(ip->i_mount))
465 		return -EIO;
466 
467 	if (error)
468 		return error;
469 	if (!size)
470 		return 0;
471 
472 	/*
473 	 * Capture amount written on completion as we can't reliably account
474 	 * for it on submission.
475 	 */
476 	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
477 
478 	/*
479 	 * We can allocate memory here while doing writeback on behalf of
480 	 * memory reclaim.  To avoid memory allocation deadlocks set the
481 	 * task-wide nofs context for the following operations.
482 	 */
483 	nofs_flag = memalloc_nofs_save();
484 
485 	if (flags & IOMAP_DIO_COW) {
486 		error = xfs_reflink_end_cow(ip, offset, size);
487 		if (error)
488 			goto out;
489 	}
490 
491 	/*
492 	 * Unwritten conversion updates the in-core isize after extent
493 	 * conversion but before updating the on-disk size. Updating isize any
494 	 * earlier allows a racing dio read to find unwritten extents before
495 	 * they are converted.
496 	 */
497 	if (flags & IOMAP_DIO_UNWRITTEN) {
498 		error = xfs_iomap_write_unwritten(ip, offset, size, true);
499 		goto out;
500 	}
501 
502 	/*
503 	 * We need to update the in-core inode size here so that we don't end up
504 	 * with the on-disk inode size being outside the in-core inode size. We
505 	 * have no other method of updating EOF for AIO, so always do it here
506 	 * if necessary.
507 	 *
508 	 * We need to lock the test/set EOF update as we can be racing with
509 	 * other IO completions here to update the EOF. Failing to serialise
510 	 * here can result in EOF moving backwards and Bad Things Happen when
511 	 * that occurs.
512 	 *
513 	 * As IO completion only ever extends EOF, we can do an unlocked check
514 	 * here to avoid taking the spinlock. If we land within the current EOF,
515 	 * then we do not need to do an extending update at all, and we don't
516 	 * need to take the lock to check this. If we race with an update moving
517 	 * EOF, then we'll either still be beyond EOF and need to take the lock,
518 	 * or we'll be within EOF and we don't need to take it at all.
519 	 */
520 	if (offset + size <= i_size_read(inode))
521 		goto out;
522 
523 	spin_lock(&ip->i_flags_lock);
524 	if (offset + size > i_size_read(inode)) {
525 		i_size_write(inode, offset + size);
526 		spin_unlock(&ip->i_flags_lock);
527 		error = xfs_setfilesize(ip, offset, size);
528 	} else {
529 		spin_unlock(&ip->i_flags_lock);
530 	}
531 
532 out:
533 	memalloc_nofs_restore(nofs_flag);
534 	return error;
535 }
536 
537 static const struct iomap_dio_ops xfs_dio_write_ops = {
538 	.end_io		= xfs_dio_write_end_io,
539 };
540 
541 /*
542  * Handle block aligned direct I/O writes
543  */
544 static noinline ssize_t
545 xfs_file_dio_write_aligned(
546 	struct xfs_inode	*ip,
547 	struct kiocb		*iocb,
548 	struct iov_iter		*from)
549 {
550 	int			iolock = XFS_IOLOCK_SHARED;
551 	ssize_t			ret;
552 
553 	ret = xfs_ilock_iocb(iocb, iolock);
554 	if (ret)
555 		return ret;
556 	ret = xfs_file_write_checks(iocb, from, &iolock);
557 	if (ret)
558 		goto out_unlock;
559 
560 	/*
561 	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
562 	 * the iolock back to shared if we had to take the exclusive lock in
563 	 * xfs_file_write_checks() for other reasons.
564 	 */
565 	if (iolock == XFS_IOLOCK_EXCL) {
566 		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
567 		iolock = XFS_IOLOCK_SHARED;
568 	}
569 	trace_xfs_file_direct_write(iocb, from);
570 	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
571 			   &xfs_dio_write_ops, 0, 0);
572 out_unlock:
573 	if (iolock)
574 		xfs_iunlock(ip, iolock);
575 	return ret;
576 }
577 
578 /*
579  * Handle block unaligned direct I/O writes
580  *
581  * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
582  * them to be done in parallel with reads and other direct I/O writes.  However,
583  * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
584  * to do sub-block zeroing and that requires serialisation against other direct
585  * I/O to the same block.  In this case we need to serialise the submission of
586  * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
587  * In the case where sub-block zeroing is not required, we can do concurrent
588  * sub-block dios to the same block successfully.
589  *
590  * Optimistically submit the I/O using the shared lock first, but use the
591  * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
592  * if block allocation or partial block zeroing would be required.  In that case
593  * we try again with the exclusive lock.
594  */
595 static noinline ssize_t
596 xfs_file_dio_write_unaligned(
597 	struct xfs_inode	*ip,
598 	struct kiocb		*iocb,
599 	struct iov_iter		*from)
600 {
601 	size_t			isize = i_size_read(VFS_I(ip));
602 	size_t			count = iov_iter_count(from);
603 	int			iolock = XFS_IOLOCK_SHARED;
604 	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
605 	ssize_t			ret;
606 
607 	/*
608 	 * Extending writes need exclusivity because of the sub-block zeroing
609 	 * that the DIO code always does for partial tail blocks beyond EOF, so
610 	 * don't even bother trying the fast path in this case.
611 	 */
612 	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
613 retry_exclusive:
614 		if (iocb->ki_flags & IOCB_NOWAIT)
615 			return -EAGAIN;
616 		iolock = XFS_IOLOCK_EXCL;
617 		flags = IOMAP_DIO_FORCE_WAIT;
618 	}
619 
620 	ret = xfs_ilock_iocb(iocb, iolock);
621 	if (ret)
622 		return ret;
623 
624 	/*
625 	 * We can't properly handle unaligned direct I/O to reflink files yet,
626 	 * as we can't unshare a partial block.
627 	 */
628 	if (xfs_is_cow_inode(ip)) {
629 		trace_xfs_reflink_bounce_dio_write(iocb, from);
630 		ret = -ENOTBLK;
631 		goto out_unlock;
632 	}
633 
634 	ret = xfs_file_write_checks(iocb, from, &iolock);
635 	if (ret)
636 		goto out_unlock;
637 
638 	/*
639 	 * If we are doing exclusive unaligned I/O, this must be the only I/O
640 	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
641 	 * conversions from the AIO end_io handler.  Wait for all other I/O to
642 	 * drain first.
643 	 */
644 	if (flags & IOMAP_DIO_FORCE_WAIT)
645 		inode_dio_wait(VFS_I(ip));
646 
647 	trace_xfs_file_direct_write(iocb, from);
648 	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
649 			   &xfs_dio_write_ops, flags, 0);
650 
651 	/*
652 	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
653 	 * layer rejected it for mapping or locking reasons. If we are doing
654 	 * nonblocking user I/O, propagate the error.
655 	 */
656 	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
657 		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
658 		xfs_iunlock(ip, iolock);
659 		goto retry_exclusive;
660 	}
661 
662 out_unlock:
663 	if (iolock)
664 		xfs_iunlock(ip, iolock);
665 	return ret;
666 }
667 
668 static ssize_t
669 xfs_file_dio_write(
670 	struct kiocb		*iocb,
671 	struct iov_iter		*from)
672 {
673 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
674 	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
675 	size_t			count = iov_iter_count(from);
676 
677 	/* direct I/O must be aligned to device logical sector size */
678 	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
679 		return -EINVAL;
680 	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
681 		return xfs_file_dio_write_unaligned(ip, iocb, from);
682 	return xfs_file_dio_write_aligned(ip, iocb, from);
683 }
684 
685 static noinline ssize_t
686 xfs_file_dax_write(
687 	struct kiocb		*iocb,
688 	struct iov_iter		*from)
689 {
690 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
691 	struct xfs_inode	*ip = XFS_I(inode);
692 	int			iolock = XFS_IOLOCK_EXCL;
693 	ssize_t			ret, error = 0;
694 	loff_t			pos;
695 
696 	ret = xfs_ilock_iocb(iocb, iolock);
697 	if (ret)
698 		return ret;
699 	ret = xfs_file_write_checks(iocb, from, &iolock);
700 	if (ret)
701 		goto out;
702 
703 	pos = iocb->ki_pos;
704 
705 	trace_xfs_file_dax_write(iocb, from);
706 	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
707 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
708 		i_size_write(inode, iocb->ki_pos);
709 		error = xfs_setfilesize(ip, pos, ret);
710 	}
711 out:
712 	if (iolock)
713 		xfs_iunlock(ip, iolock);
714 	if (error)
715 		return error;
716 
717 	if (ret > 0) {
718 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
719 
720 		/* Handle various SYNC-type writes */
721 		ret = generic_write_sync(iocb, ret);
722 	}
723 	return ret;
724 }
725 
726 STATIC ssize_t
727 xfs_file_buffered_write(
728 	struct kiocb		*iocb,
729 	struct iov_iter		*from)
730 {
731 	struct file		*file = iocb->ki_filp;
732 	struct address_space	*mapping = file->f_mapping;
733 	struct inode		*inode = mapping->host;
734 	struct xfs_inode	*ip = XFS_I(inode);
735 	ssize_t			ret;
736 	bool			cleared_space = false;
737 	int			iolock;
738 
739 	if (iocb->ki_flags & IOCB_NOWAIT)
740 		return -EOPNOTSUPP;
741 
742 write_retry:
743 	iolock = XFS_IOLOCK_EXCL;
744 	xfs_ilock(ip, iolock);
745 
746 	ret = xfs_file_write_checks(iocb, from, &iolock);
747 	if (ret)
748 		goto out;
749 
750 	/* We can write back this queue in page reclaim */
751 	current->backing_dev_info = inode_to_bdi(inode);
752 
753 	trace_xfs_file_buffered_write(iocb, from);
754 	ret = iomap_file_buffered_write(iocb, from,
755 			&xfs_buffered_write_iomap_ops);
756 	if (likely(ret >= 0))
757 		iocb->ki_pos += ret;
758 
759 	/*
760 	 * If we hit a space limit, try to free up some lingering preallocated
761 	 * space before returning an error. In the case of ENOSPC, first try to
762 	 * write back all dirty inodes to free up some of the excess reserved
763 	 * metadata space. This reduces the chances that the eofblocks scan
764 	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
765 	 * also behaves as a filter to prevent too many eofblocks scans from
766 	 * running at the same time.  Use a synchronous scan to increase the
767 	 * effectiveness of the scan.
768 	 */
769 	if (ret == -EDQUOT && !cleared_space) {
770 		xfs_iunlock(ip, iolock);
771 		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
772 		cleared_space = true;
773 		goto write_retry;
774 	} else if (ret == -ENOSPC && !cleared_space) {
775 		struct xfs_icwalk	icw = {0};
776 
777 		cleared_space = true;
778 		xfs_flush_inodes(ip->i_mount);
779 
780 		xfs_iunlock(ip, iolock);
781 		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
782 		xfs_blockgc_free_space(ip->i_mount, &icw);
783 		goto write_retry;
784 	}
785 
786 	current->backing_dev_info = NULL;
787 out:
788 	if (iolock)
789 		xfs_iunlock(ip, iolock);
790 
791 	if (ret > 0) {
792 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
793 		/* Handle various SYNC-type writes */
794 		ret = generic_write_sync(iocb, ret);
795 	}
796 	return ret;
797 }
798 
799 STATIC ssize_t
800 xfs_file_write_iter(
801 	struct kiocb		*iocb,
802 	struct iov_iter		*from)
803 {
804 	struct file		*file = iocb->ki_filp;
805 	struct address_space	*mapping = file->f_mapping;
806 	struct inode		*inode = mapping->host;
807 	struct xfs_inode	*ip = XFS_I(inode);
808 	ssize_t			ret;
809 	size_t			ocount = iov_iter_count(from);
810 
811 	XFS_STATS_INC(ip->i_mount, xs_write_calls);
812 
813 	if (ocount == 0)
814 		return 0;
815 
816 	if (xfs_is_shutdown(ip->i_mount))
817 		return -EIO;
818 
819 	if (IS_DAX(inode))
820 		return xfs_file_dax_write(iocb, from);
821 
822 	if (iocb->ki_flags & IOCB_DIRECT) {
823 		/*
824 		 * Allow a directio write to fall back to a buffered
825 		 * write *only* in the case that we're doing a reflink
826 		 * CoW.  In all other directio scenarios we do not
827 		 * allow an operation to fall back to buffered mode.
828 		 */
829 		ret = xfs_file_dio_write(iocb, from);
830 		if (ret != -ENOTBLK)
831 			return ret;
832 	}
833 
834 	return xfs_file_buffered_write(iocb, from);
835 }
836 
837 static void
838 xfs_wait_dax_page(
839 	struct inode		*inode)
840 {
841 	struct xfs_inode        *ip = XFS_I(inode);
842 
843 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
844 	schedule();
845 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
846 }
847 
848 static int
849 xfs_break_dax_layouts(
850 	struct inode		*inode,
851 	bool			*retry)
852 {
853 	struct page		*page;
854 
855 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
856 
857 	page = dax_layout_busy_page(inode->i_mapping);
858 	if (!page)
859 		return 0;
860 
861 	*retry = true;
862 	return ___wait_var_event(&page->_refcount,
863 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
864 			0, 0, xfs_wait_dax_page(inode));
865 }
866 
867 int
868 xfs_break_layouts(
869 	struct inode		*inode,
870 	uint			*iolock,
871 	enum layout_break_reason reason)
872 {
873 	bool			retry;
874 	int			error;
875 
876 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
877 
878 	do {
879 		retry = false;
880 		switch (reason) {
881 		case BREAK_UNMAP:
882 			error = xfs_break_dax_layouts(inode, &retry);
883 			if (error || retry)
884 				break;
885 			fallthrough;
886 		case BREAK_WRITE:
887 			error = xfs_break_leased_layouts(inode, iolock, &retry);
888 			break;
889 		default:
890 			WARN_ON_ONCE(1);
891 			error = -EINVAL;
892 		}
893 	} while (error == 0 && retry);
894 
895 	return error;
896 }
897 
898 #define	XFS_FALLOC_FL_SUPPORTED						\
899 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
900 		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
901 		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
902 
903 STATIC long
904 xfs_file_fallocate(
905 	struct file		*file,
906 	int			mode,
907 	loff_t			offset,
908 	loff_t			len)
909 {
910 	struct inode		*inode = file_inode(file);
911 	struct xfs_inode	*ip = XFS_I(inode);
912 	long			error;
913 	enum xfs_prealloc_flags	flags = 0;
914 	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
915 	loff_t			new_size = 0;
916 	bool			do_file_insert = false;
917 
918 	if (!S_ISREG(inode->i_mode))
919 		return -EINVAL;
920 	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
921 		return -EOPNOTSUPP;
922 
923 	xfs_ilock(ip, iolock);
924 	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
925 	if (error)
926 		goto out_unlock;
927 
928 	/*
929 	 * Must wait for all AIO to complete before we continue as AIO can
930 	 * change the file size on completion without holding any locks we
931 	 * currently hold. We must do this first because AIO can update both
932 	 * the on disk and in memory inode sizes, and the operations that follow
933 	 * require the in-memory size to be fully up-to-date.
934 	 */
935 	inode_dio_wait(inode);
936 
937 	/*
938 	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
939 	 * the cached range over the first operation we are about to run.
940 	 *
941 	 * We care about zero and collapse here because they both run a hole
942 	 * punch over the range first. Because that can zero data, and the range
943 	 * of invalidation for the shift operations is much larger, we still do
944 	 * the required flush for collapse in xfs_prepare_shift().
945 	 *
946 	 * Insert has the same range requirements as collapse, and we extend the
947 	 * file first which can zero data. Hence insert has the same
948 	 * flush/invalidate requirements as collapse and so they are both
949 	 * handled at the right time by xfs_prepare_shift().
950 	 */
951 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
952 		    FALLOC_FL_COLLAPSE_RANGE)) {
953 		error = xfs_flush_unmap_range(ip, offset, len);
954 		if (error)
955 			goto out_unlock;
956 	}
957 
958 	if (mode & FALLOC_FL_PUNCH_HOLE) {
959 		error = xfs_free_file_space(ip, offset, len);
960 		if (error)
961 			goto out_unlock;
962 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
963 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
964 			error = -EINVAL;
965 			goto out_unlock;
966 		}
967 
968 		/*
969 		 * There is no need to overlap collapse range with EOF,
970 		 * in which case it is effectively a truncate operation
971 		 */
972 		if (offset + len >= i_size_read(inode)) {
973 			error = -EINVAL;
974 			goto out_unlock;
975 		}
976 
977 		new_size = i_size_read(inode) - len;
978 
979 		error = xfs_collapse_file_space(ip, offset, len);
980 		if (error)
981 			goto out_unlock;
982 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
983 		loff_t		isize = i_size_read(inode);
984 
985 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
986 			error = -EINVAL;
987 			goto out_unlock;
988 		}
989 
990 		/*
991 		 * New inode size must not exceed ->s_maxbytes, accounting for
992 		 * possible signed overflow.
993 		 */
994 		if (inode->i_sb->s_maxbytes - isize < len) {
995 			error = -EFBIG;
996 			goto out_unlock;
997 		}
998 		new_size = isize + len;
999 
1000 		/* Offset should be less than i_size */
1001 		if (offset >= isize) {
1002 			error = -EINVAL;
1003 			goto out_unlock;
1004 		}
1005 		do_file_insert = true;
1006 	} else {
1007 		flags |= XFS_PREALLOC_SET;
1008 
1009 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1010 		    offset + len > i_size_read(inode)) {
1011 			new_size = offset + len;
1012 			error = inode_newsize_ok(inode, new_size);
1013 			if (error)
1014 				goto out_unlock;
1015 		}
1016 
1017 		if (mode & FALLOC_FL_ZERO_RANGE) {
1018 			/*
1019 			 * Punch a hole and prealloc the range.  We use a hole
1020 			 * punch rather than unwritten extent conversion for two
1021 			 * reasons:
1022 			 *
1023 			 *   1.) Hole punch handles partial block zeroing for us.
1024 			 *   2.) If prealloc returns ENOSPC, the file range is
1025 			 *       still zero-valued by virtue of the hole punch.
1026 			 */
1027 			unsigned int blksize = i_blocksize(inode);
1028 
1029 			trace_xfs_zero_file_space(ip);
1030 
1031 			error = xfs_free_file_space(ip, offset, len);
1032 			if (error)
1033 				goto out_unlock;
1034 
1035 			len = round_up(offset + len, blksize) -
1036 			      round_down(offset, blksize);
1037 			offset = round_down(offset, blksize);
1038 		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
1039 			error = xfs_reflink_unshare(ip, offset, len);
1040 			if (error)
1041 				goto out_unlock;
1042 		} else {
1043 			/*
1044 			 * If always_cow mode we can't use preallocations and
1045 			 * thus should not create them.
1046 			 */
1047 			if (xfs_is_always_cow_inode(ip)) {
1048 				error = -EOPNOTSUPP;
1049 				goto out_unlock;
1050 			}
1051 		}
1052 
1053 		if (!xfs_is_always_cow_inode(ip)) {
1054 			error = xfs_alloc_file_space(ip, offset, len);
1055 			if (error)
1056 				goto out_unlock;
1057 		}
1058 	}
1059 
1060 	if (file->f_flags & O_DSYNC)
1061 		flags |= XFS_PREALLOC_SYNC;
1062 
1063 	error = xfs_update_prealloc_flags(ip, flags);
1064 	if (error)
1065 		goto out_unlock;
1066 
1067 	/* Change file size if needed */
1068 	if (new_size) {
1069 		struct iattr iattr;
1070 
1071 		iattr.ia_valid = ATTR_SIZE;
1072 		iattr.ia_size = new_size;
1073 		error = xfs_vn_setattr_size(file_mnt_user_ns(file),
1074 					    file_dentry(file), &iattr);
1075 		if (error)
1076 			goto out_unlock;
1077 	}
1078 
1079 	/*
1080 	 * Perform hole insertion now that the file size has been
1081 	 * updated so that if we crash during the operation we don't
1082 	 * leave shifted extents past EOF and hence losing access to
1083 	 * the data that is contained within them.
1084 	 */
1085 	if (do_file_insert)
1086 		error = xfs_insert_file_space(ip, offset, len);
1087 
1088 out_unlock:
1089 	xfs_iunlock(ip, iolock);
1090 	return error;
1091 }
1092 
1093 STATIC int
1094 xfs_file_fadvise(
1095 	struct file	*file,
1096 	loff_t		start,
1097 	loff_t		end,
1098 	int		advice)
1099 {
1100 	struct xfs_inode *ip = XFS_I(file_inode(file));
1101 	int ret;
1102 	int lockflags = 0;
1103 
1104 	/*
1105 	 * Operations creating pages in page cache need protection from hole
1106 	 * punching and similar ops
1107 	 */
1108 	if (advice == POSIX_FADV_WILLNEED) {
1109 		lockflags = XFS_IOLOCK_SHARED;
1110 		xfs_ilock(ip, lockflags);
1111 	}
1112 	ret = generic_fadvise(file, start, end, advice);
1113 	if (lockflags)
1114 		xfs_iunlock(ip, lockflags);
1115 	return ret;
1116 }
1117 
1118 /* Does this file, inode, or mount want synchronous writes? */
1119 static inline bool xfs_file_sync_writes(struct file *filp)
1120 {
1121 	struct xfs_inode	*ip = XFS_I(file_inode(filp));
1122 
1123 	if (xfs_has_wsync(ip->i_mount))
1124 		return true;
1125 	if (filp->f_flags & (__O_SYNC | O_DSYNC))
1126 		return true;
1127 	if (IS_SYNC(file_inode(filp)))
1128 		return true;
1129 
1130 	return false;
1131 }
1132 
1133 STATIC loff_t
1134 xfs_file_remap_range(
1135 	struct file		*file_in,
1136 	loff_t			pos_in,
1137 	struct file		*file_out,
1138 	loff_t			pos_out,
1139 	loff_t			len,
1140 	unsigned int		remap_flags)
1141 {
1142 	struct inode		*inode_in = file_inode(file_in);
1143 	struct xfs_inode	*src = XFS_I(inode_in);
1144 	struct inode		*inode_out = file_inode(file_out);
1145 	struct xfs_inode	*dest = XFS_I(inode_out);
1146 	struct xfs_mount	*mp = src->i_mount;
1147 	loff_t			remapped = 0;
1148 	xfs_extlen_t		cowextsize;
1149 	int			ret;
1150 
1151 	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1152 		return -EINVAL;
1153 
1154 	if (!xfs_has_reflink(mp))
1155 		return -EOPNOTSUPP;
1156 
1157 	if (xfs_is_shutdown(mp))
1158 		return -EIO;
1159 
1160 	/* Prepare and then clone file data. */
1161 	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1162 			&len, remap_flags);
1163 	if (ret || len == 0)
1164 		return ret;
1165 
1166 	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1167 
1168 	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1169 			&remapped);
1170 	if (ret)
1171 		goto out_unlock;
1172 
1173 	/*
1174 	 * Carry the cowextsize hint from src to dest if we're sharing the
1175 	 * entire source file to the entire destination file, the source file
1176 	 * has a cowextsize hint, and the destination file does not.
1177 	 */
1178 	cowextsize = 0;
1179 	if (pos_in == 0 && len == i_size_read(inode_in) &&
1180 	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1181 	    pos_out == 0 && len >= i_size_read(inode_out) &&
1182 	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1183 		cowextsize = src->i_cowextsize;
1184 
1185 	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1186 			remap_flags);
1187 	if (ret)
1188 		goto out_unlock;
1189 
1190 	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1191 		xfs_log_force_inode(dest);
1192 out_unlock:
1193 	xfs_iunlock2_io_mmap(src, dest);
1194 	if (ret)
1195 		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1196 	return remapped > 0 ? remapped : ret;
1197 }
1198 
1199 STATIC int
1200 xfs_file_open(
1201 	struct inode	*inode,
1202 	struct file	*file)
1203 {
1204 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1205 		return -EFBIG;
1206 	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
1207 		return -EIO;
1208 	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1209 	return 0;
1210 }
1211 
1212 STATIC int
1213 xfs_dir_open(
1214 	struct inode	*inode,
1215 	struct file	*file)
1216 {
1217 	struct xfs_inode *ip = XFS_I(inode);
1218 	int		mode;
1219 	int		error;
1220 
1221 	error = xfs_file_open(inode, file);
1222 	if (error)
1223 		return error;
1224 
1225 	/*
1226 	 * If there are any blocks, read-ahead block 0 as we're almost
1227 	 * certain to have the next operation be a read there.
1228 	 */
1229 	mode = xfs_ilock_data_map_shared(ip);
1230 	if (ip->i_df.if_nextents > 0)
1231 		error = xfs_dir3_data_readahead(ip, 0, 0);
1232 	xfs_iunlock(ip, mode);
1233 	return error;
1234 }
1235 
1236 STATIC int
1237 xfs_file_release(
1238 	struct inode	*inode,
1239 	struct file	*filp)
1240 {
1241 	return xfs_release(XFS_I(inode));
1242 }
1243 
1244 STATIC int
1245 xfs_file_readdir(
1246 	struct file	*file,
1247 	struct dir_context *ctx)
1248 {
1249 	struct inode	*inode = file_inode(file);
1250 	xfs_inode_t	*ip = XFS_I(inode);
1251 	size_t		bufsize;
1252 
1253 	/*
1254 	 * The Linux API doesn't pass down the total size of the buffer
1255 	 * we read into down to the filesystem.  With the filldir concept
1256 	 * it's not needed for correct information, but the XFS dir2 leaf
1257 	 * code wants an estimate of the buffer size to calculate it's
1258 	 * readahead window and size the buffers used for mapping to
1259 	 * physical blocks.
1260 	 *
1261 	 * Try to give it an estimate that's good enough, maybe at some
1262 	 * point we can change the ->readdir prototype to include the
1263 	 * buffer size.  For now we use the current glibc buffer size.
1264 	 */
1265 	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
1266 
1267 	return xfs_readdir(NULL, ip, ctx, bufsize);
1268 }
1269 
1270 STATIC loff_t
1271 xfs_file_llseek(
1272 	struct file	*file,
1273 	loff_t		offset,
1274 	int		whence)
1275 {
1276 	struct inode		*inode = file->f_mapping->host;
1277 
1278 	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1279 		return -EIO;
1280 
1281 	switch (whence) {
1282 	default:
1283 		return generic_file_llseek(file, offset, whence);
1284 	case SEEK_HOLE:
1285 		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1286 		break;
1287 	case SEEK_DATA:
1288 		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1289 		break;
1290 	}
1291 
1292 	if (offset < 0)
1293 		return offset;
1294 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1295 }
1296 
1297 /*
1298  * Locking for serialisation of IO during page faults. This results in a lock
1299  * ordering of:
1300  *
1301  * mmap_lock (MM)
1302  *   sb_start_pagefault(vfs, freeze)
1303  *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1304  *       page_lock (MM)
1305  *         i_lock (XFS - extent map serialisation)
1306  */
1307 static vm_fault_t
1308 __xfs_filemap_fault(
1309 	struct vm_fault		*vmf,
1310 	enum page_entry_size	pe_size,
1311 	bool			write_fault)
1312 {
1313 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1314 	struct xfs_inode	*ip = XFS_I(inode);
1315 	vm_fault_t		ret;
1316 
1317 	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1318 
1319 	if (write_fault) {
1320 		sb_start_pagefault(inode->i_sb);
1321 		file_update_time(vmf->vma->vm_file);
1322 	}
1323 
1324 	if (IS_DAX(inode)) {
1325 		pfn_t pfn;
1326 
1327 		xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1328 		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
1329 				(write_fault && !vmf->cow_page) ?
1330 				 &xfs_direct_write_iomap_ops :
1331 				 &xfs_read_iomap_ops);
1332 		if (ret & VM_FAULT_NEEDDSYNC)
1333 			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1334 		xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1335 	} else {
1336 		if (write_fault) {
1337 			xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1338 			ret = iomap_page_mkwrite(vmf,
1339 					&xfs_buffered_write_iomap_ops);
1340 			xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1341 		} else {
1342 			ret = filemap_fault(vmf);
1343 		}
1344 	}
1345 
1346 	if (write_fault)
1347 		sb_end_pagefault(inode->i_sb);
1348 	return ret;
1349 }
1350 
1351 static inline bool
1352 xfs_is_write_fault(
1353 	struct vm_fault		*vmf)
1354 {
1355 	return (vmf->flags & FAULT_FLAG_WRITE) &&
1356 	       (vmf->vma->vm_flags & VM_SHARED);
1357 }
1358 
1359 static vm_fault_t
1360 xfs_filemap_fault(
1361 	struct vm_fault		*vmf)
1362 {
1363 	/* DAX can shortcut the normal fault path on write faults! */
1364 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1365 			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1366 			xfs_is_write_fault(vmf));
1367 }
1368 
1369 static vm_fault_t
1370 xfs_filemap_huge_fault(
1371 	struct vm_fault		*vmf,
1372 	enum page_entry_size	pe_size)
1373 {
1374 	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1375 		return VM_FAULT_FALLBACK;
1376 
1377 	/* DAX can shortcut the normal fault path on write faults! */
1378 	return __xfs_filemap_fault(vmf, pe_size,
1379 			xfs_is_write_fault(vmf));
1380 }
1381 
1382 static vm_fault_t
1383 xfs_filemap_page_mkwrite(
1384 	struct vm_fault		*vmf)
1385 {
1386 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1387 }
1388 
1389 /*
1390  * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1391  * on write faults. In reality, it needs to serialise against truncate and
1392  * prepare memory for writing so handle is as standard write fault.
1393  */
1394 static vm_fault_t
1395 xfs_filemap_pfn_mkwrite(
1396 	struct vm_fault		*vmf)
1397 {
1398 
1399 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1400 }
1401 
1402 static vm_fault_t
1403 xfs_filemap_map_pages(
1404 	struct vm_fault		*vmf,
1405 	pgoff_t			start_pgoff,
1406 	pgoff_t			end_pgoff)
1407 {
1408 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1409 	vm_fault_t ret;
1410 
1411 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1412 	ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1413 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1414 	return ret;
1415 }
1416 
1417 static const struct vm_operations_struct xfs_file_vm_ops = {
1418 	.fault		= xfs_filemap_fault,
1419 	.huge_fault	= xfs_filemap_huge_fault,
1420 	.map_pages	= xfs_filemap_map_pages,
1421 	.page_mkwrite	= xfs_filemap_page_mkwrite,
1422 	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1423 };
1424 
1425 STATIC int
1426 xfs_file_mmap(
1427 	struct file		*file,
1428 	struct vm_area_struct	*vma)
1429 {
1430 	struct inode		*inode = file_inode(file);
1431 	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1432 
1433 	/*
1434 	 * We don't support synchronous mappings for non-DAX files and
1435 	 * for DAX files if underneath dax_device is not synchronous.
1436 	 */
1437 	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1438 		return -EOPNOTSUPP;
1439 
1440 	file_accessed(file);
1441 	vma->vm_ops = &xfs_file_vm_ops;
1442 	if (IS_DAX(inode))
1443 		vma->vm_flags |= VM_HUGEPAGE;
1444 	return 0;
1445 }
1446 
1447 const struct file_operations xfs_file_operations = {
1448 	.llseek		= xfs_file_llseek,
1449 	.read_iter	= xfs_file_read_iter,
1450 	.write_iter	= xfs_file_write_iter,
1451 	.splice_read	= generic_file_splice_read,
1452 	.splice_write	= iter_file_splice_write,
1453 	.iopoll		= iocb_bio_iopoll,
1454 	.unlocked_ioctl	= xfs_file_ioctl,
1455 #ifdef CONFIG_COMPAT
1456 	.compat_ioctl	= xfs_file_compat_ioctl,
1457 #endif
1458 	.mmap		= xfs_file_mmap,
1459 	.mmap_supported_flags = MAP_SYNC,
1460 	.open		= xfs_file_open,
1461 	.release	= xfs_file_release,
1462 	.fsync		= xfs_file_fsync,
1463 	.get_unmapped_area = thp_get_unmapped_area,
1464 	.fallocate	= xfs_file_fallocate,
1465 	.fadvise	= xfs_file_fadvise,
1466 	.remap_file_range = xfs_file_remap_range,
1467 };
1468 
1469 const struct file_operations xfs_dir_file_operations = {
1470 	.open		= xfs_dir_open,
1471 	.read		= generic_read_dir,
1472 	.iterate_shared	= xfs_file_readdir,
1473 	.llseek		= generic_file_llseek,
1474 	.unlocked_ioctl	= xfs_file_ioctl,
1475 #ifdef CONFIG_COMPAT
1476 	.compat_ioctl	= xfs_file_compat_ioctl,
1477 #endif
1478 	.fsync		= xfs_dir_fsync,
1479 };
1480