~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ext4/file.c

Version: ~ [ linux-5.13-rc1 ] ~ [ linux-5.12.2 ] ~ [ linux-5.11.19 ] ~ [ linux-5.10.35 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.117 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.190 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.232 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.268 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.268 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/fs/ext4/file.c
  3  *
  4  * Copyright (C) 1992, 1993, 1994, 1995
  5  * Remy Card (card@masi.ibp.fr)
  6  * Laboratoire MASI - Institut Blaise Pascal
  7  * Universite Pierre et Marie Curie (Paris VI)
  8  *
  9  *  from
 10  *
 11  *  linux/fs/minix/file.c
 12  *
 13  *  Copyright (C) 1991, 1992  Linus Torvalds
 14  *
 15  *  ext4 fs regular file handling primitives
 16  *
 17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
 18  *      (jj@sunsite.ms.mff.cuni.cz)
 19  */
 20 
 21 #include <linux/time.h>
 22 #include <linux/fs.h>
 23 #include <linux/mount.h>
 24 #include <linux/path.h>
 25 #include <linux/dax.h>
 26 #include <linux/quotaops.h>
 27 #include <linux/pagevec.h>
 28 #include <linux/uio.h>
 29 #include "ext4.h"
 30 #include "ext4_jbd2.h"
 31 #include "xattr.h"
 32 #include "acl.h"
 33 
 34 #ifdef CONFIG_FS_DAX
 35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 36 {
 37         struct inode *inode = file_inode(iocb->ki_filp);
 38         ssize_t ret;
 39 
 40         if (!inode_trylock_shared(inode)) {
 41                 if (iocb->ki_flags & IOCB_NOWAIT)
 42                         return -EAGAIN;
 43                 inode_lock_shared(inode);
 44         }
 45         /*
 46          * Recheck under inode lock - at this point we are sure it cannot
 47          * change anymore
 48          */
 49         if (!IS_DAX(inode)) {
 50                 inode_unlock_shared(inode);
 51                 /* Fallback to buffered IO in case we cannot support DAX */
 52                 return generic_file_read_iter(iocb, to);
 53         }
 54         ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
 55         inode_unlock_shared(inode);
 56 
 57         file_accessed(iocb->ki_filp);
 58         return ret;
 59 }
 60 #endif
 61 
 62 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 63 {
 64         if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
 65                 return -EIO;
 66 
 67         if (!iov_iter_count(to))
 68                 return 0; /* skip atime */
 69 
 70 #ifdef CONFIG_FS_DAX
 71         if (IS_DAX(file_inode(iocb->ki_filp)))
 72                 return ext4_dax_read_iter(iocb, to);
 73 #endif
 74         return generic_file_read_iter(iocb, to);
 75 }
 76 
 77 /*
 78  * Called when an inode is released. Note that this is different
 79  * from ext4_file_open: open gets called at every open, but release
 80  * gets called only when /all/ the files are closed.
 81  */
 82 static int ext4_release_file(struct inode *inode, struct file *filp)
 83 {
 84         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
 85                 ext4_alloc_da_blocks(inode);
 86                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
 87         }
 88         /* if we are the last writer on the inode, drop the block reservation */
 89         if ((filp->f_mode & FMODE_WRITE) &&
 90                         (atomic_read(&inode->i_writecount) == 1) &&
 91                         !EXT4_I(inode)->i_reserved_data_blocks)
 92         {
 93                 down_write(&EXT4_I(inode)->i_data_sem);
 94                 ext4_discard_preallocations(inode);
 95                 up_write(&EXT4_I(inode)->i_data_sem);
 96         }
 97         if (is_dx(inode) && filp->private_data)
 98                 ext4_htree_free_dir_info(filp->private_data);
 99 
100         return 0;
101 }
102 
103 static void ext4_unwritten_wait(struct inode *inode)
104 {
105         wait_queue_head_t *wq = ext4_ioend_wq(inode);
106 
107         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
108 }
109 
110 /*
111  * This tests whether the IO in question is block-aligned or not.
112  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
113  * are converted to written only after the IO is complete.  Until they are
114  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
115  * it needs to zero out portions of the start and/or end block.  If 2 AIO
116  * threads are at work on the same unwritten block, they must be synchronized
117  * or one thread will zero the other's data, causing corruption.
118  */
119 static int
120 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
121 {
122         struct super_block *sb = inode->i_sb;
123         int blockmask = sb->s_blocksize - 1;
124 
125         if (pos >= i_size_read(inode))
126                 return 0;
127 
128         if ((pos | iov_iter_alignment(from)) & blockmask)
129                 return 1;
130 
131         return 0;
132 }
133 
134 /* Is IO overwriting allocated and initialized blocks? */
135 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
136 {
137         struct ext4_map_blocks map;
138         unsigned int blkbits = inode->i_blkbits;
139         int err, blklen;
140 
141         if (pos + len > i_size_read(inode))
142                 return false;
143 
144         map.m_lblk = pos >> blkbits;
145         map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
146         blklen = map.m_len;
147 
148         err = ext4_map_blocks(NULL, inode, &map, 0);
149         /*
150          * 'err==len' means that all of the blocks have been preallocated,
151          * regardless of whether they have been initialized or not. To exclude
152          * unwritten extents, we need to check m_flags.
153          */
154         return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
155 }
156 
157 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
158 {
159         struct inode *inode = file_inode(iocb->ki_filp);
160         ssize_t ret;
161 
162         ret = generic_write_checks(iocb, from);
163         if (ret <= 0)
164                 return ret;
165         /*
166          * If we have encountered a bitmap-format file, the size limit
167          * is smaller than s_maxbytes, which is for extent-mapped files.
168          */
169         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
170                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
171 
172                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
173                         return -EFBIG;
174                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
175         }
176         return iov_iter_count(from);
177 }
178 
179 #ifdef CONFIG_FS_DAX
180 static ssize_t
181 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
182 {
183         struct inode *inode = file_inode(iocb->ki_filp);
184         ssize_t ret;
185 
186         if (!inode_trylock(inode)) {
187                 if (iocb->ki_flags & IOCB_NOWAIT)
188                         return -EAGAIN;
189                 inode_lock(inode);
190         }
191         ret = ext4_write_checks(iocb, from);
192         if (ret <= 0)
193                 goto out;
194         ret = file_remove_privs(iocb->ki_filp);
195         if (ret)
196                 goto out;
197         ret = file_update_time(iocb->ki_filp);
198         if (ret)
199                 goto out;
200 
201         ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
202 out:
203         inode_unlock(inode);
204         if (ret > 0)
205                 ret = generic_write_sync(iocb, ret);
206         return ret;
207 }
208 #endif
209 
210 static ssize_t
211 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
212 {
213         struct inode *inode = file_inode(iocb->ki_filp);
214         int o_direct = iocb->ki_flags & IOCB_DIRECT;
215         int unaligned_aio = 0;
216         int overwrite = 0;
217         ssize_t ret;
218 
219         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
220                 return -EIO;
221 
222 #ifdef CONFIG_FS_DAX
223         if (IS_DAX(inode))
224                 return ext4_dax_write_iter(iocb, from);
225 #endif
226 
227         if (!inode_trylock(inode)) {
228                 if (iocb->ki_flags & IOCB_NOWAIT)
229                         return -EAGAIN;
230                 inode_lock(inode);
231         }
232 
233         ret = ext4_write_checks(iocb, from);
234         if (ret <= 0)
235                 goto out;
236 
237         /*
238          * Unaligned direct AIO must be serialized among each other as zeroing
239          * of partial blocks of two competing unaligned AIOs can result in data
240          * corruption.
241          */
242         if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
243             !is_sync_kiocb(iocb) &&
244             ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
245                 unaligned_aio = 1;
246                 ext4_unwritten_wait(inode);
247         }
248 
249         iocb->private = &overwrite;
250         /* Check whether we do a DIO overwrite or not */
251         if (o_direct && !unaligned_aio) {
252                 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
253                         if (ext4_should_dioread_nolock(inode))
254                                 overwrite = 1;
255                 } else if (iocb->ki_flags & IOCB_NOWAIT) {
256                         ret = -EAGAIN;
257                         goto out;
258                 }
259         }
260 
261         ret = __generic_file_write_iter(iocb, from);
262         inode_unlock(inode);
263 
264         if (ret > 0)
265                 ret = generic_write_sync(iocb, ret);
266 
267         return ret;
268 
269 out:
270         inode_unlock(inode);
271         return ret;
272 }
273 
274 #ifdef CONFIG_FS_DAX
275 static int ext4_dax_huge_fault(struct vm_fault *vmf,
276                 enum page_entry_size pe_size)
277 {
278         int result;
279         handle_t *handle = NULL;
280         struct inode *inode = file_inode(vmf->vma->vm_file);
281         struct super_block *sb = inode->i_sb;
282         bool write = vmf->flags & FAULT_FLAG_WRITE;
283 
284         if (write) {
285                 sb_start_pagefault(sb);
286                 file_update_time(vmf->vma->vm_file);
287                 down_read(&EXT4_I(inode)->i_mmap_sem);
288                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
289                                                EXT4_DATA_TRANS_BLOCKS(sb));
290         } else {
291                 down_read(&EXT4_I(inode)->i_mmap_sem);
292         }
293         if (!IS_ERR(handle))
294                 result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
295         else
296                 result = VM_FAULT_SIGBUS;
297         if (write) {
298                 if (!IS_ERR(handle))
299                         ext4_journal_stop(handle);
300                 up_read(&EXT4_I(inode)->i_mmap_sem);
301                 sb_end_pagefault(sb);
302         } else {
303                 up_read(&EXT4_I(inode)->i_mmap_sem);
304         }
305 
306         return result;
307 }
308 
309 static int ext4_dax_fault(struct vm_fault *vmf)
310 {
311         return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
312 }
313 
314 /*
315  * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
316  * handler we check for races agaist truncate. Note that since we cycle through
317  * i_mmap_sem, we are sure that also any hole punching that began before we
318  * were called is finished by now and so if it included part of the file we
319  * are working on, our pte will get unmapped and the check for pte_same() in
320  * wp_pfn_shared() fails. Thus fault gets retried and things work out as
321  * desired.
322  */
323 static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf)
324 {
325         struct inode *inode = file_inode(vmf->vma->vm_file);
326         struct super_block *sb = inode->i_sb;
327         loff_t size;
328         int ret;
329 
330         sb_start_pagefault(sb);
331         file_update_time(vmf->vma->vm_file);
332         down_read(&EXT4_I(inode)->i_mmap_sem);
333         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
334         if (vmf->pgoff >= size)
335                 ret = VM_FAULT_SIGBUS;
336         else
337                 ret = dax_pfn_mkwrite(vmf);
338         up_read(&EXT4_I(inode)->i_mmap_sem);
339         sb_end_pagefault(sb);
340 
341         return ret;
342 }
343 
344 static const struct vm_operations_struct ext4_dax_vm_ops = {
345         .fault          = ext4_dax_fault,
346         .huge_fault     = ext4_dax_huge_fault,
347         .page_mkwrite   = ext4_dax_fault,
348         .pfn_mkwrite    = ext4_dax_pfn_mkwrite,
349 };
350 #else
351 #define ext4_dax_vm_ops ext4_file_vm_ops
352 #endif
353 
354 static const struct vm_operations_struct ext4_file_vm_ops = {
355         .fault          = ext4_filemap_fault,
356         .map_pages      = filemap_map_pages,
357         .page_mkwrite   = ext4_page_mkwrite,
358 };
359 
360 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
361 {
362         struct inode *inode = file->f_mapping->host;
363 
364         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
365                 return -EIO;
366 
367         file_accessed(file);
368         if (IS_DAX(file_inode(file))) {
369                 vma->vm_ops = &ext4_dax_vm_ops;
370                 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
371         } else {
372                 vma->vm_ops = &ext4_file_vm_ops;
373         }
374         return 0;
375 }
376 
377 static int ext4_file_open(struct inode * inode, struct file * filp)
378 {
379         struct super_block *sb = inode->i_sb;
380         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
381         struct vfsmount *mnt = filp->f_path.mnt;
382         struct dentry *dir;
383         struct path path;
384         char buf[64], *cp;
385         int ret;
386 
387         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
388                 return -EIO;
389 
390         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
391                      !(sb->s_flags & MS_RDONLY))) {
392                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
393                 /*
394                  * Sample where the filesystem has been mounted and
395                  * store it in the superblock for sysadmin convenience
396                  * when trying to sort through large numbers of block
397                  * devices or filesystem images.
398                  */
399                 memset(buf, 0, sizeof(buf));
400                 path.mnt = mnt;
401                 path.dentry = mnt->mnt_root;
402                 cp = d_path(&path, buf, sizeof(buf));
403                 if (!IS_ERR(cp)) {
404                         handle_t *handle;
405                         int err;
406 
407                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
408                         if (IS_ERR(handle))
409                                 return PTR_ERR(handle);
410                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
411                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
412                         if (err) {
413                                 ext4_journal_stop(handle);
414                                 return err;
415                         }
416                         strlcpy(sbi->s_es->s_last_mounted, cp,
417                                 sizeof(sbi->s_es->s_last_mounted));
418                         ext4_handle_dirty_super(handle, sb);
419                         ext4_journal_stop(handle);
420                 }
421         }
422         if (ext4_encrypted_inode(inode)) {
423                 ret = fscrypt_get_encryption_info(inode);
424                 if (ret)
425                         return -EACCES;
426                 if (!fscrypt_has_encryption_key(inode))
427                         return -ENOKEY;
428         }
429 
430         dir = dget_parent(file_dentry(filp));
431         if (ext4_encrypted_inode(d_inode(dir)) &&
432                         !fscrypt_has_permitted_context(d_inode(dir), inode)) {
433                 ext4_warning(inode->i_sb,
434                              "Inconsistent encryption contexts: %lu/%lu",
435                              (unsigned long) d_inode(dir)->i_ino,
436                              (unsigned long) inode->i_ino);
437                 dput(dir);
438                 return -EPERM;
439         }
440         dput(dir);
441         /*
442          * Set up the jbd2_inode if we are opening the inode for
443          * writing and the journal is present
444          */
445         if (filp->f_mode & FMODE_WRITE) {
446                 ret = ext4_inode_attach_jinode(inode);
447                 if (ret < 0)
448                         return ret;
449         }
450 
451         /* Set the flags to support nowait AIO */
452         filp->f_mode |= FMODE_AIO_NOWAIT;
453 
454         return dquot_file_open(inode, filp);
455 }
456 
457 /*
458  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
459  * file rather than ext4_ext_walk_space() because we can introduce
460  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
461  * function.  When extent status tree has been fully implemented, it will
462  * track all extent status for a file and we can directly use it to
463  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
464  */
465 
466 /*
467  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
468  * lookup page cache to check whether or not there has some data between
469  * [startoff, endoff] because, if this range contains an unwritten extent,
470  * we determine this extent as a data or a hole according to whether the
471  * page cache has data or not.
472  */
473 static int ext4_find_unwritten_pgoff(struct inode *inode,
474                                      int whence,
475                                      ext4_lblk_t end_blk,
476                                      loff_t *offset)
477 {
478         struct pagevec pvec;
479         unsigned int blkbits;
480         pgoff_t index;
481         pgoff_t end;
482         loff_t endoff;
483         loff_t startoff;
484         loff_t lastoff;
485         int found = 0;
486 
487         blkbits = inode->i_sb->s_blocksize_bits;
488         startoff = *offset;
489         lastoff = startoff;
490         endoff = (loff_t)end_blk << blkbits;
491 
492         index = startoff >> PAGE_SHIFT;
493         end = (endoff - 1) >> PAGE_SHIFT;
494 
495         pagevec_init(&pvec, 0);
496         do {
497                 int i, num;
498                 unsigned long nr_pages;
499 
500                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
501                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
502                                           (pgoff_t)num);
503                 if (nr_pages == 0)
504                         break;
505 
506                 for (i = 0; i < nr_pages; i++) {
507                         struct page *page = pvec.pages[i];
508                         struct buffer_head *bh, *head;
509 
510                         /*
511                          * If current offset is smaller than the page offset,
512                          * there is a hole at this offset.
513                          */
514                         if (whence == SEEK_HOLE && lastoff < endoff &&
515                             lastoff < page_offset(pvec.pages[i])) {
516                                 found = 1;
517                                 *offset = lastoff;
518                                 goto out;
519                         }
520 
521                         if (page->index > end)
522                                 goto out;
523 
524                         lock_page(page);
525 
526                         if (unlikely(page->mapping != inode->i_mapping)) {
527                                 unlock_page(page);
528                                 continue;
529                         }
530 
531                         if (!page_has_buffers(page)) {
532                                 unlock_page(page);
533                                 continue;
534                         }
535 
536                         if (page_has_buffers(page)) {
537                                 lastoff = page_offset(page);
538                                 bh = head = page_buffers(page);
539                                 do {
540                                         if (lastoff + bh->b_size <= startoff)
541                                                 goto next;
542                                         if (buffer_uptodate(bh) ||
543                                             buffer_unwritten(bh)) {
544                                                 if (whence == SEEK_DATA)
545                                                         found = 1;
546                                         } else {
547                                                 if (whence == SEEK_HOLE)
548                                                         found = 1;
549                                         }
550                                         if (found) {
551                                                 *offset = max_t(loff_t,
552                                                         startoff, lastoff);
553                                                 unlock_page(page);
554                                                 goto out;
555                                         }
556 next:
557                                         lastoff += bh->b_size;
558                                         bh = bh->b_this_page;
559                                 } while (bh != head);
560                         }
561 
562                         lastoff = page_offset(page) + PAGE_SIZE;
563                         unlock_page(page);
564                 }
565 
566                 /* The no. of pages is less than our desired, we are done. */
567                 if (nr_pages < num)
568                         break;
569 
570                 index = pvec.pages[i - 1]->index + 1;
571                 pagevec_release(&pvec);
572         } while (index <= end);
573 
574         if (whence == SEEK_HOLE && lastoff < endoff) {
575                 found = 1;
576                 *offset = lastoff;
577         }
578 out:
579         pagevec_release(&pvec);
580         return found;
581 }
582 
583 /*
584  * ext4_seek_data() retrieves the offset for SEEK_DATA.
585  */
586 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
587 {
588         struct inode *inode = file->f_mapping->host;
589         struct extent_status es;
590         ext4_lblk_t start, last, end;
591         loff_t dataoff, isize;
592         int blkbits;
593         int ret;
594 
595         inode_lock(inode);
596 
597         isize = i_size_read(inode);
598         if (offset < 0 || offset >= isize) {
599                 inode_unlock(inode);
600                 return -ENXIO;
601         }
602 
603         blkbits = inode->i_sb->s_blocksize_bits;
604         start = offset >> blkbits;
605         last = start;
606         end = isize >> blkbits;
607         dataoff = offset;
608 
609         do {
610                 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
611                 if (ret <= 0) {
612                         /* No extent found -> no data */
613                         if (ret == 0)
614                                 ret = -ENXIO;
615                         inode_unlock(inode);
616                         return ret;
617                 }
618 
619                 last = es.es_lblk;
620                 if (last != start)
621                         dataoff = (loff_t)last << blkbits;
622                 if (!ext4_es_is_unwritten(&es))
623                         break;
624 
625                 /*
626                  * If there is a unwritten extent at this offset,
627                  * it will be as a data or a hole according to page
628                  * cache that has data or not.
629                  */
630                 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
631                                               es.es_lblk + es.es_len, &dataoff))
632                         break;
633                 last += es.es_len;
634                 dataoff = (loff_t)last << blkbits;
635                 cond_resched();
636         } while (last <= end);
637 
638         inode_unlock(inode);
639 
640         if (dataoff > isize)
641                 return -ENXIO;
642 
643         return vfs_setpos(file, dataoff, maxsize);
644 }
645 
646 /*
647  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
648  */
649 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
650 {
651         struct inode *inode = file->f_mapping->host;
652         struct extent_status es;
653         ext4_lblk_t start, last, end;
654         loff_t holeoff, isize;
655         int blkbits;
656         int ret;
657 
658         inode_lock(inode);
659 
660         isize = i_size_read(inode);
661         if (offset < 0 || offset >= isize) {
662                 inode_unlock(inode);
663                 return -ENXIO;
664         }
665 
666         blkbits = inode->i_sb->s_blocksize_bits;
667         start = offset >> blkbits;
668         last = start;
669         end = isize >> blkbits;
670         holeoff = offset;
671 
672         do {
673                 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
674                 if (ret < 0) {
675                         inode_unlock(inode);
676                         return ret;
677                 }
678                 /* Found a hole? */
679                 if (ret == 0 || es.es_lblk > last) {
680                         if (last != start)
681                                 holeoff = (loff_t)last << blkbits;
682                         break;
683                 }
684                 /*
685                  * If there is a unwritten extent at this offset,
686                  * it will be as a data or a hole according to page
687                  * cache that has data or not.
688                  */
689                 if (ext4_es_is_unwritten(&es) &&
690                     ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
691                                               last + es.es_len, &holeoff))
692                         break;
693 
694                 last += es.es_len;
695                 holeoff = (loff_t)last << blkbits;
696                 cond_resched();
697         } while (last <= end);
698 
699         inode_unlock(inode);
700 
701         if (holeoff > isize)
702                 holeoff = isize;
703 
704         return vfs_setpos(file, holeoff, maxsize);
705 }
706 
707 /*
708  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
709  * by calling generic_file_llseek_size() with the appropriate maxbytes
710  * value for each.
711  */
712 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
713 {
714         struct inode *inode = file->f_mapping->host;
715         loff_t maxbytes;
716 
717         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
718                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
719         else
720                 maxbytes = inode->i_sb->s_maxbytes;
721 
722         switch (whence) {
723         case SEEK_SET:
724         case SEEK_CUR:
725         case SEEK_END:
726                 return generic_file_llseek_size(file, offset, whence,
727                                                 maxbytes, i_size_read(inode));
728         case SEEK_DATA:
729                 return ext4_seek_data(file, offset, maxbytes);
730         case SEEK_HOLE:
731                 return ext4_seek_hole(file, offset, maxbytes);
732         }
733 
734         return -EINVAL;
735 }
736 
737 const struct file_operations ext4_file_operations = {
738         .llseek         = ext4_llseek,
739         .read_iter      = ext4_file_read_iter,
740         .write_iter     = ext4_file_write_iter,
741         .unlocked_ioctl = ext4_ioctl,
742 #ifdef CONFIG_COMPAT
743         .compat_ioctl   = ext4_compat_ioctl,
744 #endif
745         .mmap           = ext4_file_mmap,
746         .open           = ext4_file_open,
747         .release        = ext4_release_file,
748         .fsync          = ext4_sync_file,
749         .get_unmapped_area = thp_get_unmapped_area,
750         .splice_read    = generic_file_splice_read,
751         .splice_write   = iter_file_splice_write,
752         .fallocate      = ext4_fallocate,
753 };
754 
755 const struct inode_operations ext4_file_inode_operations = {
756         .setattr        = ext4_setattr,
757         .getattr        = ext4_file_getattr,
758         .listxattr      = ext4_listxattr,
759         .get_acl        = ext4_get_acl,
760         .set_acl        = ext4_set_acl,
761         .fiemap         = ext4_fiemap,
762 };
763 
764 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp