~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_file.c

Version: ~ [ linux-5.15-rc7 ] ~ [ linux-5.14.14 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.75 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.155 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.213 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.252 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.287 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.289 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4  * All Rights Reserved.
  5  */
  6 #include "xfs.h"
  7 #include "xfs_fs.h"
  8 #include "xfs_shared.h"
  9 #include "xfs_format.h"
 10 #include "xfs_log_format.h"
 11 #include "xfs_trans_resv.h"
 12 #include "xfs_mount.h"
 13 #include "xfs_da_format.h"
 14 #include "xfs_da_btree.h"
 15 #include "xfs_inode.h"
 16 #include "xfs_trans.h"
 17 #include "xfs_inode_item.h"
 18 #include "xfs_bmap.h"
 19 #include "xfs_bmap_util.h"
 20 #include "xfs_error.h"
 21 #include "xfs_dir2.h"
 22 #include "xfs_dir2_priv.h"
 23 #include "xfs_ioctl.h"
 24 #include "xfs_trace.h"
 25 #include "xfs_log.h"
 26 #include "xfs_icache.h"
 27 #include "xfs_pnfs.h"
 28 #include "xfs_iomap.h"
 29 #include "xfs_reflink.h"
 30 
 31 #include <linux/dcache.h>
 32 #include <linux/falloc.h>
 33 #include <linux/pagevec.h>
 34 #include <linux/backing-dev.h>
 35 #include <linux/mman.h>
 36 #include <linux/fadvise.h>
 37 
 38 static const struct vm_operations_struct xfs_file_vm_ops;
 39 
 40 int
 41 xfs_update_prealloc_flags(
 42         struct xfs_inode        *ip,
 43         enum xfs_prealloc_flags flags)
 44 {
 45         struct xfs_trans        *tp;
 46         int                     error;
 47 
 48         error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
 49                         0, 0, 0, &tp);
 50         if (error)
 51                 return error;
 52 
 53         xfs_ilock(ip, XFS_ILOCK_EXCL);
 54         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 55 
 56         if (!(flags & XFS_PREALLOC_INVISIBLE)) {
 57                 VFS_I(ip)->i_mode &= ~S_ISUID;
 58                 if (VFS_I(ip)->i_mode & S_IXGRP)
 59                         VFS_I(ip)->i_mode &= ~S_ISGID;
 60                 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 61         }
 62 
 63         if (flags & XFS_PREALLOC_SET)
 64                 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
 65         if (flags & XFS_PREALLOC_CLEAR)
 66                 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
 67 
 68         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 69         if (flags & XFS_PREALLOC_SYNC)
 70                 xfs_trans_set_sync(tp);
 71         return xfs_trans_commit(tp);
 72 }
 73 
 74 /*
 75  * Fsync operations on directories are much simpler than on regular files,
 76  * as there is no file data to flush, and thus also no need for explicit
 77  * cache flush operations, and there are no non-transaction metadata updates
 78  * on directories either.
 79  */
 80 STATIC int
 81 xfs_dir_fsync(
 82         struct file             *file,
 83         loff_t                  start,
 84         loff_t                  end,
 85         int                     datasync)
 86 {
 87         struct xfs_inode        *ip = XFS_I(file->f_mapping->host);
 88         struct xfs_mount        *mp = ip->i_mount;
 89         xfs_lsn_t               lsn = 0;
 90 
 91         trace_xfs_dir_fsync(ip);
 92 
 93         xfs_ilock(ip, XFS_ILOCK_SHARED);
 94         if (xfs_ipincount(ip))
 95                 lsn = ip->i_itemp->ili_last_lsn;
 96         xfs_iunlock(ip, XFS_ILOCK_SHARED);
 97 
 98         if (!lsn)
 99                 return 0;
100         return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
101 }
102 
103 STATIC int
104 xfs_file_fsync(
105         struct file             *file,
106         loff_t                  start,
107         loff_t                  end,
108         int                     datasync)
109 {
110         struct inode            *inode = file->f_mapping->host;
111         struct xfs_inode        *ip = XFS_I(inode);
112         struct xfs_mount        *mp = ip->i_mount;
113         int                     error = 0;
114         int                     log_flushed = 0;
115         xfs_lsn_t               lsn = 0;
116 
117         trace_xfs_file_fsync(ip);
118 
119         error = file_write_and_wait_range(file, start, end);
120         if (error)
121                 return error;
122 
123         if (XFS_FORCED_SHUTDOWN(mp))
124                 return -EIO;
125 
126         xfs_iflags_clear(ip, XFS_ITRUNCATED);
127 
128         /*
129          * If we have an RT and/or log subvolume we need to make sure to flush
130          * the write cache the device used for file data first.  This is to
131          * ensure newly written file data make it to disk before logging the new
132          * inode size in case of an extending write.
133          */
134         if (XFS_IS_REALTIME_INODE(ip))
135                 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
136         else if (mp->m_logdev_targp != mp->m_ddev_targp)
137                 xfs_blkdev_issue_flush(mp->m_ddev_targp);
138 
139         /*
140          * All metadata updates are logged, which means that we just have to
141          * flush the log up to the latest LSN that touched the inode. If we have
142          * concurrent fsync/fdatasync() calls, we need them to all block on the
143          * log force before we clear the ili_fsync_fields field. This ensures
144          * that we don't get a racing sync operation that does not wait for the
145          * metadata to hit the journal before returning. If we race with
146          * clearing the ili_fsync_fields, then all that will happen is the log
147          * force will do nothing as the lsn will already be on disk. We can't
148          * race with setting ili_fsync_fields because that is done under
149          * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
150          * until after the ili_fsync_fields is cleared.
151          */
152         xfs_ilock(ip, XFS_ILOCK_SHARED);
153         if (xfs_ipincount(ip)) {
154                 if (!datasync ||
155                     (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
156                         lsn = ip->i_itemp->ili_last_lsn;
157         }
158 
159         if (lsn) {
160                 error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
161                 ip->i_itemp->ili_fsync_fields = 0;
162         }
163         xfs_iunlock(ip, XFS_ILOCK_SHARED);
164 
165         /*
166          * If we only have a single device, and the log force about was
167          * a no-op we might have to flush the data device cache here.
168          * This can only happen for fdatasync/O_DSYNC if we were overwriting
169          * an already allocated file and thus do not have any metadata to
170          * commit.
171          */
172         if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
173             mp->m_logdev_targp == mp->m_ddev_targp)
174                 xfs_blkdev_issue_flush(mp->m_ddev_targp);
175 
176         return error;
177 }
178 
179 STATIC ssize_t
180 xfs_file_dio_aio_read(
181         struct kiocb            *iocb,
182         struct iov_iter         *to)
183 {
184         struct xfs_inode        *ip = XFS_I(file_inode(iocb->ki_filp));
185         size_t                  count = iov_iter_count(to);
186         ssize_t                 ret;
187 
188         trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
189 
190         if (!count)
191                 return 0; /* skip atime */
192 
193         file_accessed(iocb->ki_filp);
194 
195         xfs_ilock(ip, XFS_IOLOCK_SHARED);
196         ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
197         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
198 
199         return ret;
200 }
201 
202 static noinline ssize_t
203 xfs_file_dax_read(
204         struct kiocb            *iocb,
205         struct iov_iter         *to)
206 {
207         struct xfs_inode        *ip = XFS_I(iocb->ki_filp->f_mapping->host);
208         size_t                  count = iov_iter_count(to);
209         ssize_t                 ret = 0;
210 
211         trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
212 
213         if (!count)
214                 return 0; /* skip atime */
215 
216         if (iocb->ki_flags & IOCB_NOWAIT) {
217                 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
218                         return -EAGAIN;
219         } else {
220                 xfs_ilock(ip, XFS_IOLOCK_SHARED);
221         }
222 
223         ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
224         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
225 
226         file_accessed(iocb->ki_filp);
227         return ret;
228 }
229 
230 STATIC ssize_t
231 xfs_file_buffered_aio_read(
232         struct kiocb            *iocb,
233         struct iov_iter         *to)
234 {
235         struct xfs_inode        *ip = XFS_I(file_inode(iocb->ki_filp));
236         ssize_t                 ret;
237 
238         trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
239 
240         if (iocb->ki_flags & IOCB_NOWAIT) {
241                 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
242                         return -EAGAIN;
243         } else {
244                 xfs_ilock(ip, XFS_IOLOCK_SHARED);
245         }
246         ret = generic_file_read_iter(iocb, to);
247         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
248 
249         return ret;
250 }
251 
252 STATIC ssize_t
253 xfs_file_read_iter(
254         struct kiocb            *iocb,
255         struct iov_iter         *to)
256 {
257         struct inode            *inode = file_inode(iocb->ki_filp);
258         struct xfs_mount        *mp = XFS_I(inode)->i_mount;
259         ssize_t                 ret = 0;
260 
261         XFS_STATS_INC(mp, xs_read_calls);
262 
263         if (XFS_FORCED_SHUTDOWN(mp))
264                 return -EIO;
265 
266         if (IS_DAX(inode))
267                 ret = xfs_file_dax_read(iocb, to);
268         else if (iocb->ki_flags & IOCB_DIRECT)
269                 ret = xfs_file_dio_aio_read(iocb, to);
270         else
271                 ret = xfs_file_buffered_aio_read(iocb, to);
272 
273         if (ret > 0)
274                 XFS_STATS_ADD(mp, xs_read_bytes, ret);
275         return ret;
276 }
277 
278 /*
279  * Common pre-write limit and setup checks.
280  *
281  * Called with the iolocked held either shared and exclusive according to
282  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
283  * if called for a direct write beyond i_size.
284  */
285 STATIC ssize_t
286 xfs_file_aio_write_checks(
287         struct kiocb            *iocb,
288         struct iov_iter         *from,
289         int                     *iolock)
290 {
291         struct file             *file = iocb->ki_filp;
292         struct inode            *inode = file->f_mapping->host;
293         struct xfs_inode        *ip = XFS_I(inode);
294         ssize_t                 error = 0;
295         size_t                  count = iov_iter_count(from);
296         bool                    drained_dio = false;
297         loff_t                  isize;
298 
299 restart:
300         error = generic_write_checks(iocb, from);
301         if (error <= 0)
302                 return error;
303 
304         error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
305         if (error)
306                 return error;
307 
308         /*
309          * For changing security info in file_remove_privs() we need i_rwsem
310          * exclusively.
311          */
312         if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
313                 xfs_iunlock(ip, *iolock);
314                 *iolock = XFS_IOLOCK_EXCL;
315                 xfs_ilock(ip, *iolock);
316                 goto restart;
317         }
318         /*
319          * If the offset is beyond the size of the file, we need to zero any
320          * blocks that fall between the existing EOF and the start of this
321          * write.  If zeroing is needed and we are currently holding the
322          * iolock shared, we need to update it to exclusive which implies
323          * having to redo all checks before.
324          *
325          * We need to serialise against EOF updates that occur in IO
326          * completions here. We want to make sure that nobody is changing the
327          * size while we do this check until we have placed an IO barrier (i.e.
328          * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
329          * The spinlock effectively forms a memory barrier once we have the
330          * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
331          * and hence be able to correctly determine if we need to run zeroing.
332          */
333         spin_lock(&ip->i_flags_lock);
334         isize = i_size_read(inode);
335         if (iocb->ki_pos > isize) {
336                 spin_unlock(&ip->i_flags_lock);
337                 if (!drained_dio) {
338                         if (*iolock == XFS_IOLOCK_SHARED) {
339                                 xfs_iunlock(ip, *iolock);
340                                 *iolock = XFS_IOLOCK_EXCL;
341                                 xfs_ilock(ip, *iolock);
342                                 iov_iter_reexpand(from, count);
343                         }
344                         /*
345                          * We now have an IO submission barrier in place, but
346                          * AIO can do EOF updates during IO completion and hence
347                          * we now need to wait for all of them to drain. Non-AIO
348                          * DIO will have drained before we are given the
349                          * XFS_IOLOCK_EXCL, and so for most cases this wait is a
350                          * no-op.
351                          */
352                         inode_dio_wait(inode);
353                         drained_dio = true;
354                         goto restart;
355                 }
356         
357                 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
358                 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
359                                 NULL, &xfs_iomap_ops);
360                 if (error)
361                         return error;
362         } else
363                 spin_unlock(&ip->i_flags_lock);
364 
365         /*
366          * Updating the timestamps will grab the ilock again from
367          * xfs_fs_dirty_inode, so we have to call it after dropping the
368          * lock above.  Eventually we should look into a way to avoid
369          * the pointless lock roundtrip.
370          */
371         if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
372                 error = file_update_time(file);
373                 if (error)
374                         return error;
375         }
376 
377         /*
378          * If we're writing the file then make sure to clear the setuid and
379          * setgid bits if the process is not being run by root.  This keeps
380          * people from modifying setuid and setgid binaries.
381          */
382         if (!IS_NOSEC(inode))
383                 return file_remove_privs(file);
384         return 0;
385 }
386 
387 static int
388 xfs_dio_write_end_io(
389         struct kiocb            *iocb,
390         ssize_t                 size,
391         unsigned                flags)
392 {
393         struct inode            *inode = file_inode(iocb->ki_filp);
394         struct xfs_inode        *ip = XFS_I(inode);
395         loff_t                  offset = iocb->ki_pos;
396         int                     error = 0;
397 
398         trace_xfs_end_io_direct_write(ip, offset, size);
399 
400         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
401                 return -EIO;
402 
403         if (size <= 0)
404                 return size;
405 
406         /*
407          * Capture amount written on completion as we can't reliably account
408          * for it on submission.
409          */
410         XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
411 
412         if (flags & IOMAP_DIO_COW) {
413                 error = xfs_reflink_end_cow(ip, offset, size);
414                 if (error)
415                         return error;
416         }
417 
418         /*
419          * Unwritten conversion updates the in-core isize after extent
420          * conversion but before updating the on-disk size. Updating isize any
421          * earlier allows a racing dio read to find unwritten extents before
422          * they are converted.
423          */
424         if (flags & IOMAP_DIO_UNWRITTEN)
425                 return xfs_iomap_write_unwritten(ip, offset, size, true);
426 
427         /*
428          * We need to update the in-core inode size here so that we don't end up
429          * with the on-disk inode size being outside the in-core inode size. We
430          * have no other method of updating EOF for AIO, so always do it here
431          * if necessary.
432          *
433          * We need to lock the test/set EOF update as we can be racing with
434          * other IO completions here to update the EOF. Failing to serialise
435          * here can result in EOF moving backwards and Bad Things Happen when
436          * that occurs.
437          */
438         spin_lock(&ip->i_flags_lock);
439         if (offset + size > i_size_read(inode)) {
440                 i_size_write(inode, offset + size);
441                 spin_unlock(&ip->i_flags_lock);
442                 error = xfs_setfilesize(ip, offset, size);
443         } else {
444                 spin_unlock(&ip->i_flags_lock);
445         }
446 
447         return error;
448 }
449 
450 /*
451  * xfs_file_dio_aio_write - handle direct IO writes
452  *
453  * Lock the inode appropriately to prepare for and issue a direct IO write.
454  * By separating it from the buffered write path we remove all the tricky to
455  * follow locking changes and looping.
456  *
457  * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
458  * until we're sure the bytes at the new EOF have been zeroed and/or the cached
459  * pages are flushed out.
460  *
461  * In most cases the direct IO writes will be done holding IOLOCK_SHARED
462  * allowing them to be done in parallel with reads and other direct IO writes.
463  * However, if the IO is not aligned to filesystem blocks, the direct IO layer
464  * needs to do sub-block zeroing and that requires serialisation against other
465  * direct IOs to the same block. In this case we need to serialise the
466  * submission of the unaligned IOs so that we don't get racing block zeroing in
467  * the dio layer.  To avoid the problem with aio, we also need to wait for
468  * outstanding IOs to complete so that unwritten extent conversion is completed
469  * before we try to map the overlapping block. This is currently implemented by
470  * hitting it with a big hammer (i.e. inode_dio_wait()).
471  *
472  * Returns with locks held indicated by @iolock and errors indicated by
473  * negative return values.
474  */
475 STATIC ssize_t
476 xfs_file_dio_aio_write(
477         struct kiocb            *iocb,
478         struct iov_iter         *from)
479 {
480         struct file             *file = iocb->ki_filp;
481         struct address_space    *mapping = file->f_mapping;
482         struct inode            *inode = mapping->host;
483         struct xfs_inode        *ip = XFS_I(inode);
484         struct xfs_mount        *mp = ip->i_mount;
485         ssize_t                 ret = 0;
486         int                     unaligned_io = 0;
487         int                     iolock;
488         size_t                  count = iov_iter_count(from);
489         struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
490                                         mp->m_rtdev_targp : mp->m_ddev_targp;
491 
492         /* DIO must be aligned to device logical sector size */
493         if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
494                 return -EINVAL;
495 
496         /*
497          * Don't take the exclusive iolock here unless the I/O is unaligned to
498          * the file system block size.  We don't need to consider the EOF
499          * extension case here because xfs_file_aio_write_checks() will relock
500          * the inode as necessary for EOF zeroing cases and fill out the new
501          * inode size as appropriate.
502          */
503         if ((iocb->ki_pos & mp->m_blockmask) ||
504             ((iocb->ki_pos + count) & mp->m_blockmask)) {
505                 unaligned_io = 1;
506 
507                 /*
508                  * We can't properly handle unaligned direct I/O to reflink
509                  * files yet, as we can't unshare a partial block.
510                  */
511                 if (xfs_is_cow_inode(ip)) {
512                         trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
513                         return -EREMCHG;
514                 }
515                 iolock = XFS_IOLOCK_EXCL;
516         } else {
517                 iolock = XFS_IOLOCK_SHARED;
518         }
519 
520         if (iocb->ki_flags & IOCB_NOWAIT) {
521                 /* unaligned dio always waits, bail */
522                 if (unaligned_io)
523                         return -EAGAIN;
524                 if (!xfs_ilock_nowait(ip, iolock))
525                         return -EAGAIN;
526         } else {
527                 xfs_ilock(ip, iolock);
528         }
529 
530         ret = xfs_file_aio_write_checks(iocb, from, &iolock);
531         if (ret)
532                 goto out;
533         count = iov_iter_count(from);
534 
535         /*
536          * If we are doing unaligned IO, we can't allow any other overlapping IO
537          * in-flight at the same time or we risk data corruption. Wait for all
538          * other IO to drain before we submit. If the IO is aligned, demote the
539          * iolock if we had to take the exclusive lock in
540          * xfs_file_aio_write_checks() for other reasons.
541          */
542         if (unaligned_io) {
543                 inode_dio_wait(inode);
544         } else if (iolock == XFS_IOLOCK_EXCL) {
545                 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
546                 iolock = XFS_IOLOCK_SHARED;
547         }
548 
549         trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
550         ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
551 
552         /*
553          * If unaligned, this is the only IO in-flight. If it has not yet
554          * completed, wait on it before we release the iolock to prevent
555          * subsequent overlapping IO.
556          */
557         if (ret == -EIOCBQUEUED && unaligned_io)
558                 inode_dio_wait(inode);
559 out:
560         xfs_iunlock(ip, iolock);
561 
562         /*
563          * No fallback to buffered IO on errors for XFS, direct IO will either
564          * complete fully or fail.
565          */
566         ASSERT(ret < 0 || ret == count);
567         return ret;
568 }
569 
570 static noinline ssize_t
571 xfs_file_dax_write(
572         struct kiocb            *iocb,
573         struct iov_iter         *from)
574 {
575         struct inode            *inode = iocb->ki_filp->f_mapping->host;
576         struct xfs_inode        *ip = XFS_I(inode);
577         int                     iolock = XFS_IOLOCK_EXCL;
578         ssize_t                 ret, error = 0;
579         size_t                  count;
580         loff_t                  pos;
581 
582         if (iocb->ki_flags & IOCB_NOWAIT) {
583                 if (!xfs_ilock_nowait(ip, iolock))
584                         return -EAGAIN;
585         } else {
586                 xfs_ilock(ip, iolock);
587         }
588 
589         ret = xfs_file_aio_write_checks(iocb, from, &iolock);
590         if (ret)
591                 goto out;
592 
593         pos = iocb->ki_pos;
594         count = iov_iter_count(from);
595 
596         trace_xfs_file_dax_write(ip, count, pos);
597         ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
598         if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
599                 i_size_write(inode, iocb->ki_pos);
600                 error = xfs_setfilesize(ip, pos, ret);
601         }
602 out:
603         xfs_iunlock(ip, iolock);
604         if (error)
605                 return error;
606 
607         if (ret > 0) {
608                 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
609 
610                 /* Handle various SYNC-type writes */
611                 ret = generic_write_sync(iocb, ret);
612         }
613         return ret;
614 }
615 
616 STATIC ssize_t
617 xfs_file_buffered_aio_write(
618         struct kiocb            *iocb,
619         struct iov_iter         *from)
620 {
621         struct file             *file = iocb->ki_filp;
622         struct address_space    *mapping = file->f_mapping;
623         struct inode            *inode = mapping->host;
624         struct xfs_inode        *ip = XFS_I(inode);
625         ssize_t                 ret;
626         int                     enospc = 0;
627         int                     iolock;
628 
629         if (iocb->ki_flags & IOCB_NOWAIT)
630                 return -EOPNOTSUPP;
631 
632 write_retry:
633         iolock = XFS_IOLOCK_EXCL;
634         xfs_ilock(ip, iolock);
635 
636         ret = xfs_file_aio_write_checks(iocb, from, &iolock);
637         if (ret)
638                 goto out;
639 
640         /* We can write back this queue in page reclaim */
641         current->backing_dev_info = inode_to_bdi(inode);
642 
643         trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
644         ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
645         if (likely(ret >= 0))
646                 iocb->ki_pos += ret;
647 
648         /*
649          * If we hit a space limit, try to free up some lingering preallocated
650          * space before returning an error. In the case of ENOSPC, first try to
651          * write back all dirty inodes to free up some of the excess reserved
652          * metadata space. This reduces the chances that the eofblocks scan
653          * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
654          * also behaves as a filter to prevent too many eofblocks scans from
655          * running at the same time.
656          */
657         if (ret == -EDQUOT && !enospc) {
658                 xfs_iunlock(ip, iolock);
659                 enospc = xfs_inode_free_quota_eofblocks(ip);
660                 if (enospc)
661                         goto write_retry;
662                 enospc = xfs_inode_free_quota_cowblocks(ip);
663                 if (enospc)
664                         goto write_retry;
665                 iolock = 0;
666         } else if (ret == -ENOSPC && !enospc) {
667                 struct xfs_eofblocks eofb = {0};
668 
669                 enospc = 1;
670                 xfs_flush_inodes(ip->i_mount);
671 
672                 xfs_iunlock(ip, iolock);
673                 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
674                 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
675                 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
676                 goto write_retry;
677         }
678 
679         current->backing_dev_info = NULL;
680 out:
681         if (iolock)
682                 xfs_iunlock(ip, iolock);
683 
684         if (ret > 0) {
685                 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
686                 /* Handle various SYNC-type writes */
687                 ret = generic_write_sync(iocb, ret);
688         }
689         return ret;
690 }
691 
692 STATIC ssize_t
693 xfs_file_write_iter(
694         struct kiocb            *iocb,
695         struct iov_iter         *from)
696 {
697         struct file             *file = iocb->ki_filp;
698         struct address_space    *mapping = file->f_mapping;
699         struct inode            *inode = mapping->host;
700         struct xfs_inode        *ip = XFS_I(inode);
701         ssize_t                 ret;
702         size_t                  ocount = iov_iter_count(from);
703 
704         XFS_STATS_INC(ip->i_mount, xs_write_calls);
705 
706         if (ocount == 0)
707                 return 0;
708 
709         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
710                 return -EIO;
711 
712         if (IS_DAX(inode))
713                 return xfs_file_dax_write(iocb, from);
714 
715         if (iocb->ki_flags & IOCB_DIRECT) {
716                 /*
717                  * Allow a directio write to fall back to a buffered
718                  * write *only* in the case that we're doing a reflink
719                  * CoW.  In all other directio scenarios we do not
720                  * allow an operation to fall back to buffered mode.
721                  */
722                 ret = xfs_file_dio_aio_write(iocb, from);
723                 if (ret != -EREMCHG)
724                         return ret;
725         }
726 
727         return xfs_file_buffered_aio_write(iocb, from);
728 }
729 
730 static void
731 xfs_wait_dax_page(
732         struct inode            *inode)
733 {
734         struct xfs_inode        *ip = XFS_I(inode);
735 
736         xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
737         schedule();
738         xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
739 }
740 
741 static int
742 xfs_break_dax_layouts(
743         struct inode            *inode,
744         bool                    *retry)
745 {
746         struct page             *page;
747 
748         ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
749 
750         page = dax_layout_busy_page(inode->i_mapping);
751         if (!page)
752                 return 0;
753 
754         *retry = true;
755         return ___wait_var_event(&page->_refcount,
756                         atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
757                         0, 0, xfs_wait_dax_page(inode));
758 }
759 
760 int
761 xfs_break_layouts(
762         struct inode            *inode,
763         uint                    *iolock,
764         enum layout_break_reason reason)
765 {
766         bool                    retry;
767         int                     error;
768 
769         ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
770 
771         do {
772                 retry = false;
773                 switch (reason) {
774                 case BREAK_UNMAP:
775                         error = xfs_break_dax_layouts(inode, &retry);
776                         if (error || retry)
777                                 break;
778                         /* fall through */
779                 case BREAK_WRITE:
780                         error = xfs_break_leased_layouts(inode, iolock, &retry);
781                         break;
782                 default:
783                         WARN_ON_ONCE(1);
784                         error = -EINVAL;
785                 }
786         } while (error == 0 && retry);
787 
788         return error;
789 }
790 
791 #define XFS_FALLOC_FL_SUPPORTED                                         \
792                 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |           \
793                  FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |      \
794                  FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
795 
796 STATIC long
797 xfs_file_fallocate(
798         struct file             *file,
799         int                     mode,
800         loff_t                  offset,
801         loff_t                  len)
802 {
803         struct inode            *inode = file_inode(file);
804         struct xfs_inode        *ip = XFS_I(inode);
805         long                    error;
806         enum xfs_prealloc_flags flags = 0;
807         uint                    iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
808         loff_t                  new_size = 0;
809         bool                    do_file_insert = false;
810 
811         if (!S_ISREG(inode->i_mode))
812                 return -EINVAL;
813         if (mode & ~XFS_FALLOC_FL_SUPPORTED)
814                 return -EOPNOTSUPP;
815 
816         xfs_ilock(ip, iolock);
817         error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
818         if (error)
819                 goto out_unlock;
820 
821         if (mode & FALLOC_FL_PUNCH_HOLE) {
822                 error = xfs_free_file_space(ip, offset, len);
823                 if (error)
824                         goto out_unlock;
825         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
826                 unsigned int blksize_mask = i_blocksize(inode) - 1;
827 
828                 if (offset & blksize_mask || len & blksize_mask) {
829                         error = -EINVAL;
830                         goto out_unlock;
831                 }
832 
833                 /*
834                  * There is no need to overlap collapse range with EOF,
835                  * in which case it is effectively a truncate operation
836                  */
837                 if (offset + len >= i_size_read(inode)) {
838                         error = -EINVAL;
839                         goto out_unlock;
840                 }
841 
842                 new_size = i_size_read(inode) - len;
843 
844                 error = xfs_collapse_file_space(ip, offset, len);
845                 if (error)
846                         goto out_unlock;
847         } else if (mode & FALLOC_FL_INSERT_RANGE) {
848                 unsigned int    blksize_mask = i_blocksize(inode) - 1;
849                 loff_t          isize = i_size_read(inode);
850 
851                 if (offset & blksize_mask || len & blksize_mask) {
852                         error = -EINVAL;
853                         goto out_unlock;
854                 }
855 
856                 /*
857                  * New inode size must not exceed ->s_maxbytes, accounting for
858                  * possible signed overflow.
859                  */
860                 if (inode->i_sb->s_maxbytes - isize < len) {
861                         error = -EFBIG;
862                         goto out_unlock;
863                 }
864                 new_size = isize + len;
865 
866                 /* Offset should be less than i_size */
867                 if (offset >= isize) {
868                         error = -EINVAL;
869                         goto out_unlock;
870                 }
871                 do_file_insert = true;
872         } else {
873                 flags |= XFS_PREALLOC_SET;
874 
875                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
876                     offset + len > i_size_read(inode)) {
877                         new_size = offset + len;
878                         error = inode_newsize_ok(inode, new_size);
879                         if (error)
880                                 goto out_unlock;
881                 }
882 
883                 if (mode & FALLOC_FL_ZERO_RANGE) {
884                         error = xfs_zero_file_space(ip, offset, len);
885                 } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
886                         error = xfs_reflink_unshare(ip, offset, len);
887                         if (error)
888                                 goto out_unlock;
889 
890                         if (!xfs_is_always_cow_inode(ip)) {
891                                 error = xfs_alloc_file_space(ip, offset, len,
892                                                 XFS_BMAPI_PREALLOC);
893                         }
894                 } else {
895                         /*
896                          * If always_cow mode we can't use preallocations and
897                          * thus should not create them.
898                          */
899                         if (xfs_is_always_cow_inode(ip)) {
900                                 error = -EOPNOTSUPP;
901                                 goto out_unlock;
902                         }
903 
904                         error = xfs_alloc_file_space(ip, offset, len,
905                                                      XFS_BMAPI_PREALLOC);
906                 }
907                 if (error)
908                         goto out_unlock;
909         }
910 
911         if (file->f_flags & O_DSYNC)
912                 flags |= XFS_PREALLOC_SYNC;
913 
914         error = xfs_update_prealloc_flags(ip, flags);
915         if (error)
916                 goto out_unlock;
917 
918         /* Change file size if needed */
919         if (new_size) {
920                 struct iattr iattr;
921 
922                 iattr.ia_valid = ATTR_SIZE;
923                 iattr.ia_size = new_size;
924                 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
925                 if (error)
926                         goto out_unlock;
927         }
928 
929         /*
930          * Perform hole insertion now that the file size has been
931          * updated so that if we crash during the operation we don't
932          * leave shifted extents past EOF and hence losing access to
933          * the data that is contained within them.
934          */
935         if (do_file_insert)
936                 error = xfs_insert_file_space(ip, offset, len);
937 
938 out_unlock:
939         xfs_iunlock(ip, iolock);
940         return error;
941 }
942 
943 STATIC int
944 xfs_file_fadvise(
945         struct file     *file,
946         loff_t          start,
947         loff_t          end,
948         int             advice)
949 {
950         struct xfs_inode *ip = XFS_I(file_inode(file));
951         int ret;
952         int lockflags = 0;
953 
954         /*
955          * Operations creating pages in page cache need protection from hole
956          * punching and similar ops
957          */
958         if (advice == POSIX_FADV_WILLNEED) {
959                 lockflags = XFS_IOLOCK_SHARED;
960                 xfs_ilock(ip, lockflags);
961         }
962         ret = generic_fadvise(file, start, end, advice);
963         if (lockflags)
964                 xfs_iunlock(ip, lockflags);
965         return ret;
966 }
967 
968 STATIC loff_t
969 xfs_file_remap_range(
970         struct file             *file_in,
971         loff_t                  pos_in,
972         struct file             *file_out,
973         loff_t                  pos_out,
974         loff_t                  len,
975         unsigned int            remap_flags)
976 {
977         struct inode            *inode_in = file_inode(file_in);
978         struct xfs_inode        *src = XFS_I(inode_in);
979         struct inode            *inode_out = file_inode(file_out);
980         struct xfs_inode        *dest = XFS_I(inode_out);
981         struct xfs_mount        *mp = src->i_mount;
982         loff_t                  remapped = 0;
983         xfs_extlen_t            cowextsize;
984         int                     ret;
985 
986         if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
987                 return -EINVAL;
988 
989         if (!xfs_sb_version_hasreflink(&mp->m_sb))
990                 return -EOPNOTSUPP;
991 
992         if (XFS_FORCED_SHUTDOWN(mp))
993                 return -EIO;
994 
995         /* Prepare and then clone file data. */
996         ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
997                         &len, remap_flags);
998         if (ret < 0 || len == 0)
999                 return ret;
1000 
1001         trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1002 
1003         ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1004                         &remapped);
1005         if (ret)
1006                 goto out_unlock;
1007 
1008         /*
1009          * Carry the cowextsize hint from src to dest if we're sharing the
1010          * entire source file to the entire destination file, the source file
1011          * has a cowextsize hint, and the destination file does not.
1012          */
1013         cowextsize = 0;
1014         if (pos_in == 0 && len == i_size_read(inode_in) &&
1015             (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1016             pos_out == 0 && len >= i_size_read(inode_out) &&
1017             !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
1018                 cowextsize = src->i_d.di_cowextsize;
1019 
1020         ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1021                         remap_flags);
1022 
1023 out_unlock:
1024         xfs_reflink_remap_unlock(file_in, file_out);
1025         if (ret)
1026                 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1027         return remapped > 0 ? remapped : ret;
1028 }
1029 
1030 STATIC int
1031 xfs_file_open(
1032         struct inode    *inode,
1033         struct file     *file)
1034 {
1035         if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1036                 return -EFBIG;
1037         if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1038                 return -EIO;
1039         file->f_mode |= FMODE_NOWAIT;
1040         return 0;
1041 }
1042 
1043 STATIC int
1044 xfs_dir_open(
1045         struct inode    *inode,
1046         struct file     *file)
1047 {
1048         struct xfs_inode *ip = XFS_I(inode);
1049         int             mode;
1050         int             error;
1051 
1052         error = xfs_file_open(inode, file);
1053         if (error)
1054                 return error;
1055 
1056         /*
1057          * If there are any blocks, read-ahead block 0 as we're almost
1058          * certain to have the next operation be a read there.
1059          */
1060         mode = xfs_ilock_data_map_shared(ip);
1061         if (ip->i_d.di_nextents > 0)
1062                 error = xfs_dir3_data_readahead(ip, 0, -1);
1063         xfs_iunlock(ip, mode);
1064         return error;
1065 }
1066 
1067 STATIC int
1068 xfs_file_release(
1069         struct inode    *inode,
1070         struct file     *filp)
1071 {
1072         return xfs_release(XFS_I(inode));
1073 }
1074 
1075 STATIC int
1076 xfs_file_readdir(
1077         struct file     *file,
1078         struct dir_context *ctx)
1079 {
1080         struct inode    *inode = file_inode(file);
1081         xfs_inode_t     *ip = XFS_I(inode);
1082         size_t          bufsize;
1083 
1084         /*
1085          * The Linux API doesn't pass down the total size of the buffer
1086          * we read into down to the filesystem.  With the filldir concept
1087          * it's not needed for correct information, but the XFS dir2 leaf
1088          * code wants an estimate of the buffer size to calculate it's
1089          * readahead window and size the buffers used for mapping to
1090          * physical blocks.
1091          *
1092          * Try to give it an estimate that's good enough, maybe at some
1093          * point we can change the ->readdir prototype to include the
1094          * buffer size.  For now we use the current glibc buffer size.
1095          */
1096         bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
1097 
1098         return xfs_readdir(NULL, ip, ctx, bufsize);
1099 }
1100 
1101 STATIC loff_t
1102 xfs_file_llseek(
1103         struct file     *file,
1104         loff_t          offset,
1105         int             whence)
1106 {
1107         struct inode            *inode = file->f_mapping->host;
1108 
1109         if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1110                 return -EIO;
1111 
1112         switch (whence) {
1113         default:
1114                 return generic_file_llseek(file, offset, whence);
1115         case SEEK_HOLE:
1116                 offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1117                 break;
1118         case SEEK_DATA:
1119                 offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1120                 break;
1121         }
1122 
1123         if (offset < 0)
1124                 return offset;
1125         return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1126 }
1127 
1128 /*
1129  * Locking for serialisation of IO during page faults. This results in a lock
1130  * ordering of:
1131  *
1132  * mmap_sem (MM)
1133  *   sb_start_pagefault(vfs, freeze)
1134  *     i_mmaplock (XFS - truncate serialisation)
1135  *       page_lock (MM)
1136  *         i_lock (XFS - extent map serialisation)
1137  */
1138 static vm_fault_t
1139 __xfs_filemap_fault(
1140         struct vm_fault         *vmf,
1141         enum page_entry_size    pe_size,
1142         bool                    write_fault)
1143 {
1144         struct inode            *inode = file_inode(vmf->vma->vm_file);
1145         struct xfs_inode        *ip = XFS_I(inode);
1146         vm_fault_t              ret;
1147 
1148         trace_xfs_filemap_fault(ip, pe_size, write_fault);
1149 
1150         if (write_fault) {
1151                 sb_start_pagefault(inode->i_sb);
1152                 file_update_time(vmf->vma->vm_file);
1153         }
1154 
1155         xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1156         if (IS_DAX(inode)) {
1157                 pfn_t pfn;
1158 
1159                 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1160                 if (ret & VM_FAULT_NEEDDSYNC)
1161                         ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1162         } else {
1163                 if (write_fault)
1164                         ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1165                 else
1166                         ret = filemap_fault(vmf);
1167         }
1168         xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1169 
1170         if (write_fault)
1171                 sb_end_pagefault(inode->i_sb);
1172         return ret;
1173 }
1174 
1175 static vm_fault_t
1176 xfs_filemap_fault(
1177         struct vm_fault         *vmf)
1178 {
1179         /* DAX can shortcut the normal fault path on write faults! */
1180         return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1181                         IS_DAX(file_inode(vmf->vma->vm_file)) &&
1182                         (vmf->flags & FAULT_FLAG_WRITE));
1183 }
1184 
1185 static vm_fault_t
1186 xfs_filemap_huge_fault(
1187         struct vm_fault         *vmf,
1188         enum page_entry_size    pe_size)
1189 {
1190         if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1191                 return VM_FAULT_FALLBACK;
1192 
1193         /* DAX can shortcut the normal fault path on write faults! */
1194         return __xfs_filemap_fault(vmf, pe_size,
1195                         (vmf->flags & FAULT_FLAG_WRITE));
1196 }
1197 
1198 static vm_fault_t
1199 xfs_filemap_page_mkwrite(
1200         struct vm_fault         *vmf)
1201 {
1202         return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1203 }
1204 
1205 /*
1206  * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1207  * on write faults. In reality, it needs to serialise against truncate and
1208  * prepare memory for writing so handle is as standard write fault.
1209  */
1210 static vm_fault_t
1211 xfs_filemap_pfn_mkwrite(
1212         struct vm_fault         *vmf)
1213 {
1214 
1215         return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1216 }
1217 
1218 static const struct vm_operations_struct xfs_file_vm_ops = {
1219         .fault          = xfs_filemap_fault,
1220         .huge_fault     = xfs_filemap_huge_fault,
1221         .map_pages      = filemap_map_pages,
1222         .page_mkwrite   = xfs_filemap_page_mkwrite,
1223         .pfn_mkwrite    = xfs_filemap_pfn_mkwrite,
1224 };
1225 
1226 STATIC int
1227 xfs_file_mmap(
1228         struct file     *filp,
1229         struct vm_area_struct *vma)
1230 {
1231         /*
1232          * We don't support synchronous mappings for non-DAX files. At least
1233          * until someone comes with a sensible use case.
1234          */
1235         if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1236                 return -EOPNOTSUPP;
1237 
1238         file_accessed(filp);
1239         vma->vm_ops = &xfs_file_vm_ops;
1240         if (IS_DAX(file_inode(filp)))
1241                 vma->vm_flags |= VM_HUGEPAGE;
1242         return 0;
1243 }
1244 
1245 const struct file_operations xfs_file_operations = {
1246         .llseek         = xfs_file_llseek,
1247         .read_iter      = xfs_file_read_iter,
1248         .write_iter     = xfs_file_write_iter,
1249         .splice_read    = generic_file_splice_read,
1250         .splice_write   = iter_file_splice_write,
1251         .iopoll         = iomap_dio_iopoll,
1252         .unlocked_ioctl = xfs_file_ioctl,
1253 #ifdef CONFIG_COMPAT
1254         .compat_ioctl   = xfs_file_compat_ioctl,
1255 #endif
1256         .mmap           = xfs_file_mmap,
1257         .mmap_supported_flags = MAP_SYNC,
1258         .open           = xfs_file_open,
1259         .release        = xfs_file_release,
1260         .fsync          = xfs_file_fsync,
1261         .get_unmapped_area = thp_get_unmapped_area,
1262         .fallocate      = xfs_file_fallocate,
1263         .fadvise        = xfs_file_fadvise,
1264         .remap_file_range = xfs_file_remap_range,
1265 };
1266 
1267 const struct file_operations xfs_dir_file_operations = {
1268         .open           = xfs_dir_open,
1269         .read           = generic_read_dir,
1270         .iterate_shared = xfs_file_readdir,
1271         .llseek         = generic_file_llseek,
1272         .unlocked_ioctl = xfs_file_ioctl,
1273 #ifdef CONFIG_COMPAT
1274         .compat_ioctl   = xfs_file_compat_ioctl,
1275 #endif
1276         .fsync          = xfs_dir_fsync,
1277 };
1278 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp