~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/btrfs/inode.c

Version: ~ [ linux-4.19-rc7 ] ~ [ linux-4.18.12 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.74 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.131 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.159 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.123 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.59 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2007 Oracle.  All rights reserved.
  4  */
  5 
  6 #include <linux/kernel.h>
  7 #include <linux/bio.h>
  8 #include <linux/buffer_head.h>
  9 #include <linux/file.h>
 10 #include <linux/fs.h>
 11 #include <linux/pagemap.h>
 12 #include <linux/highmem.h>
 13 #include <linux/time.h>
 14 #include <linux/init.h>
 15 #include <linux/string.h>
 16 #include <linux/backing-dev.h>
 17 #include <linux/writeback.h>
 18 #include <linux/compat.h>
 19 #include <linux/xattr.h>
 20 #include <linux/posix_acl.h>
 21 #include <linux/falloc.h>
 22 #include <linux/slab.h>
 23 #include <linux/ratelimit.h>
 24 #include <linux/btrfs.h>
 25 #include <linux/blkdev.h>
 26 #include <linux/posix_acl_xattr.h>
 27 #include <linux/uio.h>
 28 #include <linux/magic.h>
 29 #include <linux/iversion.h>
 30 #include <asm/unaligned.h>
 31 #include "ctree.h"
 32 #include "disk-io.h"
 33 #include "transaction.h"
 34 #include "btrfs_inode.h"
 35 #include "print-tree.h"
 36 #include "ordered-data.h"
 37 #include "xattr.h"
 38 #include "tree-log.h"
 39 #include "volumes.h"
 40 #include "compression.h"
 41 #include "locking.h"
 42 #include "free-space-cache.h"
 43 #include "inode-map.h"
 44 #include "backref.h"
 45 #include "props.h"
 46 #include "qgroup.h"
 47 #include "dedupe.h"
 48 
 49 struct btrfs_iget_args {
 50         struct btrfs_key *location;
 51         struct btrfs_root *root;
 52 };
 53 
 54 struct btrfs_dio_data {
 55         u64 reserve;
 56         u64 unsubmitted_oe_range_start;
 57         u64 unsubmitted_oe_range_end;
 58         int overwrite;
 59 };
 60 
 61 static const struct inode_operations btrfs_dir_inode_operations;
 62 static const struct inode_operations btrfs_symlink_inode_operations;
 63 static const struct inode_operations btrfs_dir_ro_inode_operations;
 64 static const struct inode_operations btrfs_special_inode_operations;
 65 static const struct inode_operations btrfs_file_inode_operations;
 66 static const struct address_space_operations btrfs_aops;
 67 static const struct address_space_operations btrfs_symlink_aops;
 68 static const struct file_operations btrfs_dir_file_operations;
 69 static const struct extent_io_ops btrfs_extent_io_ops;
 70 
 71 static struct kmem_cache *btrfs_inode_cachep;
 72 struct kmem_cache *btrfs_trans_handle_cachep;
 73 struct kmem_cache *btrfs_path_cachep;
 74 struct kmem_cache *btrfs_free_space_cachep;
 75 
 76 #define S_SHIFT 12
 77 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
 78         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
 79         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
 80         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
 81         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
 82         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
 83         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
 84         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
 85 };
 86 
 87 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
 88 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
 89 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 90 static noinline int cow_file_range(struct inode *inode,
 91                                    struct page *locked_page,
 92                                    u64 start, u64 end, u64 delalloc_end,
 93                                    int *page_started, unsigned long *nr_written,
 94                                    int unlock, struct btrfs_dedupe_hash *hash);
 95 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
 96                                        u64 orig_start, u64 block_start,
 97                                        u64 block_len, u64 orig_block_len,
 98                                        u64 ram_bytes, int compress_type,
 99                                        int type);
100 
101 static void __endio_write_update_ordered(struct inode *inode,
102                                          const u64 offset, const u64 bytes,
103                                          const bool uptodate);
104 
105 /*
106  * Cleanup all submitted ordered extents in specified range to handle errors
107  * from the fill_dellaloc() callback.
108  *
109  * NOTE: caller must ensure that when an error happens, it can not call
110  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
111  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
112  * to be released, which we want to happen only when finishing the ordered
113  * extent (btrfs_finish_ordered_io()). Also note that the caller of the
114  * fill_delalloc() callback already does proper cleanup for the first page of
115  * the range, that is, it invokes the callback writepage_end_io_hook() for the
116  * range of the first page.
117  */
118 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
119                                                  const u64 offset,
120                                                  const u64 bytes)
121 {
122         unsigned long index = offset >> PAGE_SHIFT;
123         unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
124         struct page *page;
125 
126         while (index <= end_index) {
127                 page = find_get_page(inode->i_mapping, index);
128                 index++;
129                 if (!page)
130                         continue;
131                 ClearPagePrivate2(page);
132                 put_page(page);
133         }
134         return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
135                                             bytes - PAGE_SIZE, false);
136 }
137 
138 static int btrfs_dirty_inode(struct inode *inode);
139 
140 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
141 void btrfs_test_inode_set_ops(struct inode *inode)
142 {
143         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
144 }
145 #endif
146 
147 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
148                                      struct inode *inode,  struct inode *dir,
149                                      const struct qstr *qstr)
150 {
151         int err;
152 
153         err = btrfs_init_acl(trans, inode, dir);
154         if (!err)
155                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
156         return err;
157 }
158 
159 /*
160  * this does all the hard work for inserting an inline extent into
161  * the btree.  The caller should have done a btrfs_drop_extents so that
162  * no overlapping inline items exist in the btree
163  */
164 static int insert_inline_extent(struct btrfs_trans_handle *trans,
165                                 struct btrfs_path *path, int extent_inserted,
166                                 struct btrfs_root *root, struct inode *inode,
167                                 u64 start, size_t size, size_t compressed_size,
168                                 int compress_type,
169                                 struct page **compressed_pages)
170 {
171         struct extent_buffer *leaf;
172         struct page *page = NULL;
173         char *kaddr;
174         unsigned long ptr;
175         struct btrfs_file_extent_item *ei;
176         int ret;
177         size_t cur_size = size;
178         unsigned long offset;
179 
180         if (compressed_size && compressed_pages)
181                 cur_size = compressed_size;
182 
183         inode_add_bytes(inode, size);
184 
185         if (!extent_inserted) {
186                 struct btrfs_key key;
187                 size_t datasize;
188 
189                 key.objectid = btrfs_ino(BTRFS_I(inode));
190                 key.offset = start;
191                 key.type = BTRFS_EXTENT_DATA_KEY;
192 
193                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
194                 path->leave_spinning = 1;
195                 ret = btrfs_insert_empty_item(trans, root, path, &key,
196                                               datasize);
197                 if (ret)
198                         goto fail;
199         }
200         leaf = path->nodes[0];
201         ei = btrfs_item_ptr(leaf, path->slots[0],
202                             struct btrfs_file_extent_item);
203         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
204         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
205         btrfs_set_file_extent_encryption(leaf, ei, 0);
206         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
207         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
208         ptr = btrfs_file_extent_inline_start(ei);
209 
210         if (compress_type != BTRFS_COMPRESS_NONE) {
211                 struct page *cpage;
212                 int i = 0;
213                 while (compressed_size > 0) {
214                         cpage = compressed_pages[i];
215                         cur_size = min_t(unsigned long, compressed_size,
216                                        PAGE_SIZE);
217 
218                         kaddr = kmap_atomic(cpage);
219                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
220                         kunmap_atomic(kaddr);
221 
222                         i++;
223                         ptr += cur_size;
224                         compressed_size -= cur_size;
225                 }
226                 btrfs_set_file_extent_compression(leaf, ei,
227                                                   compress_type);
228         } else {
229                 page = find_get_page(inode->i_mapping,
230                                      start >> PAGE_SHIFT);
231                 btrfs_set_file_extent_compression(leaf, ei, 0);
232                 kaddr = kmap_atomic(page);
233                 offset = start & (PAGE_SIZE - 1);
234                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
235                 kunmap_atomic(kaddr);
236                 put_page(page);
237         }
238         btrfs_mark_buffer_dirty(leaf);
239         btrfs_release_path(path);
240 
241         /*
242          * we're an inline extent, so nobody can
243          * extend the file past i_size without locking
244          * a page we already have locked.
245          *
246          * We must do any isize and inode updates
247          * before we unlock the pages.  Otherwise we
248          * could end up racing with unlink.
249          */
250         BTRFS_I(inode)->disk_i_size = inode->i_size;
251         ret = btrfs_update_inode(trans, root, inode);
252 
253 fail:
254         return ret;
255 }
256 
257 
258 /*
259  * conditionally insert an inline extent into the file.  This
260  * does the checks required to make sure the data is small enough
261  * to fit as an inline extent.
262  */
263 static noinline int cow_file_range_inline(struct inode *inode, u64 start,
264                                           u64 end, size_t compressed_size,
265                                           int compress_type,
266                                           struct page **compressed_pages)
267 {
268         struct btrfs_root *root = BTRFS_I(inode)->root;
269         struct btrfs_fs_info *fs_info = root->fs_info;
270         struct btrfs_trans_handle *trans;
271         u64 isize = i_size_read(inode);
272         u64 actual_end = min(end + 1, isize);
273         u64 inline_len = actual_end - start;
274         u64 aligned_end = ALIGN(end, fs_info->sectorsize);
275         u64 data_len = inline_len;
276         int ret;
277         struct btrfs_path *path;
278         int extent_inserted = 0;
279         u32 extent_item_size;
280 
281         if (compressed_size)
282                 data_len = compressed_size;
283 
284         if (start > 0 ||
285             actual_end > fs_info->sectorsize ||
286             data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
287             (!compressed_size &&
288             (actual_end & (fs_info->sectorsize - 1)) == 0) ||
289             end + 1 < isize ||
290             data_len > fs_info->max_inline) {
291                 return 1;
292         }
293 
294         path = btrfs_alloc_path();
295         if (!path)
296                 return -ENOMEM;
297 
298         trans = btrfs_join_transaction(root);
299         if (IS_ERR(trans)) {
300                 btrfs_free_path(path);
301                 return PTR_ERR(trans);
302         }
303         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
304 
305         if (compressed_size && compressed_pages)
306                 extent_item_size = btrfs_file_extent_calc_inline_size(
307                    compressed_size);
308         else
309                 extent_item_size = btrfs_file_extent_calc_inline_size(
310                     inline_len);
311 
312         ret = __btrfs_drop_extents(trans, root, inode, path,
313                                    start, aligned_end, NULL,
314                                    1, 1, extent_item_size, &extent_inserted);
315         if (ret) {
316                 btrfs_abort_transaction(trans, ret);
317                 goto out;
318         }
319 
320         if (isize > actual_end)
321                 inline_len = min_t(u64, isize, actual_end);
322         ret = insert_inline_extent(trans, path, extent_inserted,
323                                    root, inode, start,
324                                    inline_len, compressed_size,
325                                    compress_type, compressed_pages);
326         if (ret && ret != -ENOSPC) {
327                 btrfs_abort_transaction(trans, ret);
328                 goto out;
329         } else if (ret == -ENOSPC) {
330                 ret = 1;
331                 goto out;
332         }
333 
334         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
335         btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
336 out:
337         /*
338          * Don't forget to free the reserved space, as for inlined extent
339          * it won't count as data extent, free them directly here.
340          * And at reserve time, it's always aligned to page size, so
341          * just free one page here.
342          */
343         btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
344         btrfs_free_path(path);
345         btrfs_end_transaction(trans);
346         return ret;
347 }
348 
349 struct async_extent {
350         u64 start;
351         u64 ram_size;
352         u64 compressed_size;
353         struct page **pages;
354         unsigned long nr_pages;
355         int compress_type;
356         struct list_head list;
357 };
358 
359 struct async_cow {
360         struct inode *inode;
361         struct btrfs_root *root;
362         struct page *locked_page;
363         u64 start;
364         u64 end;
365         unsigned int write_flags;
366         struct list_head extents;
367         struct btrfs_work work;
368 };
369 
370 static noinline int add_async_extent(struct async_cow *cow,
371                                      u64 start, u64 ram_size,
372                                      u64 compressed_size,
373                                      struct page **pages,
374                                      unsigned long nr_pages,
375                                      int compress_type)
376 {
377         struct async_extent *async_extent;
378 
379         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
380         BUG_ON(!async_extent); /* -ENOMEM */
381         async_extent->start = start;
382         async_extent->ram_size = ram_size;
383         async_extent->compressed_size = compressed_size;
384         async_extent->pages = pages;
385         async_extent->nr_pages = nr_pages;
386         async_extent->compress_type = compress_type;
387         list_add_tail(&async_extent->list, &cow->extents);
388         return 0;
389 }
390 
391 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
392 {
393         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
394 
395         /* force compress */
396         if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
397                 return 1;
398         /* defrag ioctl */
399         if (BTRFS_I(inode)->defrag_compress)
400                 return 1;
401         /* bad compression ratios */
402         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
403                 return 0;
404         if (btrfs_test_opt(fs_info, COMPRESS) ||
405             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
406             BTRFS_I(inode)->prop_compress)
407                 return btrfs_compress_heuristic(inode, start, end);
408         return 0;
409 }
410 
411 static inline void inode_should_defrag(struct btrfs_inode *inode,
412                 u64 start, u64 end, u64 num_bytes, u64 small_write)
413 {
414         /* If this is a small write inside eof, kick off a defrag */
415         if (num_bytes < small_write &&
416             (start > 0 || end + 1 < inode->disk_i_size))
417                 btrfs_add_inode_defrag(NULL, inode);
418 }
419 
420 /*
421  * we create compressed extents in two phases.  The first
422  * phase compresses a range of pages that have already been
423  * locked (both pages and state bits are locked).
424  *
425  * This is done inside an ordered work queue, and the compression
426  * is spread across many cpus.  The actual IO submission is step
427  * two, and the ordered work queue takes care of making sure that
428  * happens in the same order things were put onto the queue by
429  * writepages and friends.
430  *
431  * If this code finds it can't get good compression, it puts an
432  * entry onto the work queue to write the uncompressed bytes.  This
433  * makes sure that both compressed inodes and uncompressed inodes
434  * are written in the same order that the flusher thread sent them
435  * down.
436  */
437 static noinline void compress_file_range(struct inode *inode,
438                                         struct page *locked_page,
439                                         u64 start, u64 end,
440                                         struct async_cow *async_cow,
441                                         int *num_added)
442 {
443         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
444         u64 blocksize = fs_info->sectorsize;
445         u64 actual_end;
446         u64 isize = i_size_read(inode);
447         int ret = 0;
448         struct page **pages = NULL;
449         unsigned long nr_pages;
450         unsigned long total_compressed = 0;
451         unsigned long total_in = 0;
452         int i;
453         int will_compress;
454         int compress_type = fs_info->compress_type;
455         int redirty = 0;
456 
457         inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
458                         SZ_16K);
459 
460         actual_end = min_t(u64, isize, end + 1);
461 again:
462         will_compress = 0;
463         nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
464         BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
465         nr_pages = min_t(unsigned long, nr_pages,
466                         BTRFS_MAX_COMPRESSED / PAGE_SIZE);
467 
468         /*
469          * we don't want to send crud past the end of i_size through
470          * compression, that's just a waste of CPU time.  So, if the
471          * end of the file is before the start of our current
472          * requested range of bytes, we bail out to the uncompressed
473          * cleanup code that can deal with all of this.
474          *
475          * It isn't really the fastest way to fix things, but this is a
476          * very uncommon corner.
477          */
478         if (actual_end <= start)
479                 goto cleanup_and_bail_uncompressed;
480 
481         total_compressed = actual_end - start;
482 
483         /*
484          * skip compression for a small file range(<=blocksize) that
485          * isn't an inline extent, since it doesn't save disk space at all.
486          */
487         if (total_compressed <= blocksize &&
488            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
489                 goto cleanup_and_bail_uncompressed;
490 
491         total_compressed = min_t(unsigned long, total_compressed,
492                         BTRFS_MAX_UNCOMPRESSED);
493         total_in = 0;
494         ret = 0;
495 
496         /*
497          * we do compression for mount -o compress and when the
498          * inode has not been flagged as nocompress.  This flag can
499          * change at any time if we discover bad compression ratios.
500          */
501         if (inode_need_compress(inode, start, end)) {
502                 WARN_ON(pages);
503                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
504                 if (!pages) {
505                         /* just bail out to the uncompressed code */
506                         goto cont;
507                 }
508 
509                 if (BTRFS_I(inode)->defrag_compress)
510                         compress_type = BTRFS_I(inode)->defrag_compress;
511                 else if (BTRFS_I(inode)->prop_compress)
512                         compress_type = BTRFS_I(inode)->prop_compress;
513 
514                 /*
515                  * we need to call clear_page_dirty_for_io on each
516                  * page in the range.  Otherwise applications with the file
517                  * mmap'd can wander in and change the page contents while
518                  * we are compressing them.
519                  *
520                  * If the compression fails for any reason, we set the pages
521                  * dirty again later on.
522                  *
523                  * Note that the remaining part is redirtied, the start pointer
524                  * has moved, the end is the original one.
525                  */
526                 if (!redirty) {
527                         extent_range_clear_dirty_for_io(inode, start, end);
528                         redirty = 1;
529                 }
530 
531                 /* Compression level is applied here and only here */
532                 ret = btrfs_compress_pages(
533                         compress_type | (fs_info->compress_level << 4),
534                                            inode->i_mapping, start,
535                                            pages,
536                                            &nr_pages,
537                                            &total_in,
538                                            &total_compressed);
539 
540                 if (!ret) {
541                         unsigned long offset = total_compressed &
542                                 (PAGE_SIZE - 1);
543                         struct page *page = pages[nr_pages - 1];
544                         char *kaddr;
545 
546                         /* zero the tail end of the last page, we might be
547                          * sending it down to disk
548                          */
549                         if (offset) {
550                                 kaddr = kmap_atomic(page);
551                                 memset(kaddr + offset, 0,
552                                        PAGE_SIZE - offset);
553                                 kunmap_atomic(kaddr);
554                         }
555                         will_compress = 1;
556                 }
557         }
558 cont:
559         if (start == 0) {
560                 /* lets try to make an inline extent */
561                 if (ret || total_in < actual_end) {
562                         /* we didn't compress the entire range, try
563                          * to make an uncompressed inline extent.
564                          */
565                         ret = cow_file_range_inline(inode, start, end, 0,
566                                                     BTRFS_COMPRESS_NONE, NULL);
567                 } else {
568                         /* try making a compressed inline extent */
569                         ret = cow_file_range_inline(inode, start, end,
570                                                     total_compressed,
571                                                     compress_type, pages);
572                 }
573                 if (ret <= 0) {
574                         unsigned long clear_flags = EXTENT_DELALLOC |
575                                 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
576                                 EXTENT_DO_ACCOUNTING;
577                         unsigned long page_error_op;
578 
579                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
580 
581                         /*
582                          * inline extent creation worked or returned error,
583                          * we don't need to create any more async work items.
584                          * Unlock and free up our temp pages.
585                          *
586                          * We use DO_ACCOUNTING here because we need the
587                          * delalloc_release_metadata to be done _after_ we drop
588                          * our outstanding extent for clearing delalloc for this
589                          * range.
590                          */
591                         extent_clear_unlock_delalloc(inode, start, end, end,
592                                                      NULL, clear_flags,
593                                                      PAGE_UNLOCK |
594                                                      PAGE_CLEAR_DIRTY |
595                                                      PAGE_SET_WRITEBACK |
596                                                      page_error_op |
597                                                      PAGE_END_WRITEBACK);
598                         goto free_pages_out;
599                 }
600         }
601 
602         if (will_compress) {
603                 /*
604                  * we aren't doing an inline extent round the compressed size
605                  * up to a block size boundary so the allocator does sane
606                  * things
607                  */
608                 total_compressed = ALIGN(total_compressed, blocksize);
609 
610                 /*
611                  * one last check to make sure the compression is really a
612                  * win, compare the page count read with the blocks on disk,
613                  * compression must free at least one sector size
614                  */
615                 total_in = ALIGN(total_in, PAGE_SIZE);
616                 if (total_compressed + blocksize <= total_in) {
617                         *num_added += 1;
618 
619                         /*
620                          * The async work queues will take care of doing actual
621                          * allocation on disk for these compressed pages, and
622                          * will submit them to the elevator.
623                          */
624                         add_async_extent(async_cow, start, total_in,
625                                         total_compressed, pages, nr_pages,
626                                         compress_type);
627 
628                         if (start + total_in < end) {
629                                 start += total_in;
630                                 pages = NULL;
631                                 cond_resched();
632                                 goto again;
633                         }
634                         return;
635                 }
636         }
637         if (pages) {
638                 /*
639                  * the compression code ran but failed to make things smaller,
640                  * free any pages it allocated and our page pointer array
641                  */
642                 for (i = 0; i < nr_pages; i++) {
643                         WARN_ON(pages[i]->mapping);
644                         put_page(pages[i]);
645                 }
646                 kfree(pages);
647                 pages = NULL;
648                 total_compressed = 0;
649                 nr_pages = 0;
650 
651                 /* flag the file so we don't compress in the future */
652                 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
653                     !(BTRFS_I(inode)->prop_compress)) {
654                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
655                 }
656         }
657 cleanup_and_bail_uncompressed:
658         /*
659          * No compression, but we still need to write the pages in the file
660          * we've been given so far.  redirty the locked page if it corresponds
661          * to our extent and set things up for the async work queue to run
662          * cow_file_range to do the normal delalloc dance.
663          */
664         if (page_offset(locked_page) >= start &&
665             page_offset(locked_page) <= end)
666                 __set_page_dirty_nobuffers(locked_page);
667                 /* unlocked later on in the async handlers */
668 
669         if (redirty)
670                 extent_range_redirty_for_io(inode, start, end);
671         add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
672                          BTRFS_COMPRESS_NONE);
673         *num_added += 1;
674 
675         return;
676 
677 free_pages_out:
678         for (i = 0; i < nr_pages; i++) {
679                 WARN_ON(pages[i]->mapping);
680                 put_page(pages[i]);
681         }
682         kfree(pages);
683 }
684 
685 static void free_async_extent_pages(struct async_extent *async_extent)
686 {
687         int i;
688 
689         if (!async_extent->pages)
690                 return;
691 
692         for (i = 0; i < async_extent->nr_pages; i++) {
693                 WARN_ON(async_extent->pages[i]->mapping);
694                 put_page(async_extent->pages[i]);
695         }
696         kfree(async_extent->pages);
697         async_extent->nr_pages = 0;
698         async_extent->pages = NULL;
699 }
700 
701 /*
702  * phase two of compressed writeback.  This is the ordered portion
703  * of the code, which only gets called in the order the work was
704  * queued.  We walk all the async extents created by compress_file_range
705  * and send them down to the disk.
706  */
707 static noinline void submit_compressed_extents(struct inode *inode,
708                                               struct async_cow *async_cow)
709 {
710         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
711         struct async_extent *async_extent;
712         u64 alloc_hint = 0;
713         struct btrfs_key ins;
714         struct extent_map *em;
715         struct btrfs_root *root = BTRFS_I(inode)->root;
716         struct extent_io_tree *io_tree;
717         int ret = 0;
718 
719 again:
720         while (!list_empty(&async_cow->extents)) {
721                 async_extent = list_entry(async_cow->extents.next,
722                                           struct async_extent, list);
723                 list_del(&async_extent->list);
724 
725                 io_tree = &BTRFS_I(inode)->io_tree;
726 
727 retry:
728                 /* did the compression code fall back to uncompressed IO? */
729                 if (!async_extent->pages) {
730                         int page_started = 0;
731                         unsigned long nr_written = 0;
732 
733                         lock_extent(io_tree, async_extent->start,
734                                          async_extent->start +
735                                          async_extent->ram_size - 1);
736 
737                         /* allocate blocks */
738                         ret = cow_file_range(inode, async_cow->locked_page,
739                                              async_extent->start,
740                                              async_extent->start +
741                                              async_extent->ram_size - 1,
742                                              async_extent->start +
743                                              async_extent->ram_size - 1,
744                                              &page_started, &nr_written, 0,
745                                              NULL);
746 
747                         /* JDM XXX */
748 
749                         /*
750                          * if page_started, cow_file_range inserted an
751                          * inline extent and took care of all the unlocking
752                          * and IO for us.  Otherwise, we need to submit
753                          * all those pages down to the drive.
754                          */
755                         if (!page_started && !ret)
756                                 extent_write_locked_range(inode,
757                                                   async_extent->start,
758                                                   async_extent->start +
759                                                   async_extent->ram_size - 1,
760                                                   WB_SYNC_ALL);
761                         else if (ret)
762                                 unlock_page(async_cow->locked_page);
763                         kfree(async_extent);
764                         cond_resched();
765                         continue;
766                 }
767 
768                 lock_extent(io_tree, async_extent->start,
769                             async_extent->start + async_extent->ram_size - 1);
770 
771                 ret = btrfs_reserve_extent(root, async_extent->ram_size,
772                                            async_extent->compressed_size,
773                                            async_extent->compressed_size,
774                                            0, alloc_hint, &ins, 1, 1);
775                 if (ret) {
776                         free_async_extent_pages(async_extent);
777 
778                         if (ret == -ENOSPC) {
779                                 unlock_extent(io_tree, async_extent->start,
780                                               async_extent->start +
781                                               async_extent->ram_size - 1);
782 
783                                 /*
784                                  * we need to redirty the pages if we decide to
785                                  * fallback to uncompressed IO, otherwise we
786                                  * will not submit these pages down to lower
787                                  * layers.
788                                  */
789                                 extent_range_redirty_for_io(inode,
790                                                 async_extent->start,
791                                                 async_extent->start +
792                                                 async_extent->ram_size - 1);
793 
794                                 goto retry;
795                         }
796                         goto out_free;
797                 }
798                 /*
799                  * here we're doing allocation and writeback of the
800                  * compressed pages
801                  */
802                 em = create_io_em(inode, async_extent->start,
803                                   async_extent->ram_size, /* len */
804                                   async_extent->start, /* orig_start */
805                                   ins.objectid, /* block_start */
806                                   ins.offset, /* block_len */
807                                   ins.offset, /* orig_block_len */
808                                   async_extent->ram_size, /* ram_bytes */
809                                   async_extent->compress_type,
810                                   BTRFS_ORDERED_COMPRESSED);
811                 if (IS_ERR(em))
812                         /* ret value is not necessary due to void function */
813                         goto out_free_reserve;
814                 free_extent_map(em);
815 
816                 ret = btrfs_add_ordered_extent_compress(inode,
817                                                 async_extent->start,
818                                                 ins.objectid,
819                                                 async_extent->ram_size,
820                                                 ins.offset,
821                                                 BTRFS_ORDERED_COMPRESSED,
822                                                 async_extent->compress_type);
823                 if (ret) {
824                         btrfs_drop_extent_cache(BTRFS_I(inode),
825                                                 async_extent->start,
826                                                 async_extent->start +
827                                                 async_extent->ram_size - 1, 0);
828                         goto out_free_reserve;
829                 }
830                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
831 
832                 /*
833                  * clear dirty, set writeback and unlock the pages.
834                  */
835                 extent_clear_unlock_delalloc(inode, async_extent->start,
836                                 async_extent->start +
837                                 async_extent->ram_size - 1,
838                                 async_extent->start +
839                                 async_extent->ram_size - 1,
840                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
841                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
842                                 PAGE_SET_WRITEBACK);
843                 if (btrfs_submit_compressed_write(inode,
844                                     async_extent->start,
845                                     async_extent->ram_size,
846                                     ins.objectid,
847                                     ins.offset, async_extent->pages,
848                                     async_extent->nr_pages,
849                                     async_cow->write_flags)) {
850                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
851                         struct page *p = async_extent->pages[0];
852                         const u64 start = async_extent->start;
853                         const u64 end = start + async_extent->ram_size - 1;
854 
855                         p->mapping = inode->i_mapping;
856                         tree->ops->writepage_end_io_hook(p, start, end,
857                                                          NULL, 0);
858                         p->mapping = NULL;
859                         extent_clear_unlock_delalloc(inode, start, end, end,
860                                                      NULL, 0,
861                                                      PAGE_END_WRITEBACK |
862                                                      PAGE_SET_ERROR);
863                         free_async_extent_pages(async_extent);
864                 }
865                 alloc_hint = ins.objectid + ins.offset;
866                 kfree(async_extent);
867                 cond_resched();
868         }
869         return;
870 out_free_reserve:
871         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
872         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
873 out_free:
874         extent_clear_unlock_delalloc(inode, async_extent->start,
875                                      async_extent->start +
876                                      async_extent->ram_size - 1,
877                                      async_extent->start +
878                                      async_extent->ram_size - 1,
879                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
880                                      EXTENT_DELALLOC_NEW |
881                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
882                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
883                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
884                                      PAGE_SET_ERROR);
885         free_async_extent_pages(async_extent);
886         kfree(async_extent);
887         goto again;
888 }
889 
890 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
891                                       u64 num_bytes)
892 {
893         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
894         struct extent_map *em;
895         u64 alloc_hint = 0;
896 
897         read_lock(&em_tree->lock);
898         em = search_extent_mapping(em_tree, start, num_bytes);
899         if (em) {
900                 /*
901                  * if block start isn't an actual block number then find the
902                  * first block in this inode and use that as a hint.  If that
903                  * block is also bogus then just don't worry about it.
904                  */
905                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
906                         free_extent_map(em);
907                         em = search_extent_mapping(em_tree, 0, 0);
908                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
909                                 alloc_hint = em->block_start;
910                         if (em)
911                                 free_extent_map(em);
912                 } else {
913                         alloc_hint = em->block_start;
914                         free_extent_map(em);
915                 }
916         }
917         read_unlock(&em_tree->lock);
918 
919         return alloc_hint;
920 }
921 
922 /*
923  * when extent_io.c finds a delayed allocation range in the file,
924  * the call backs end up in this code.  The basic idea is to
925  * allocate extents on disk for the range, and create ordered data structs
926  * in ram to track those extents.
927  *
928  * locked_page is the page that writepage had locked already.  We use
929  * it to make sure we don't do extra locks or unlocks.
930  *
931  * *page_started is set to one if we unlock locked_page and do everything
932  * required to start IO on it.  It may be clean and already done with
933  * IO when we return.
934  */
935 static noinline int cow_file_range(struct inode *inode,
936                                    struct page *locked_page,
937                                    u64 start, u64 end, u64 delalloc_end,
938                                    int *page_started, unsigned long *nr_written,
939                                    int unlock, struct btrfs_dedupe_hash *hash)
940 {
941         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
942         struct btrfs_root *root = BTRFS_I(inode)->root;
943         u64 alloc_hint = 0;
944         u64 num_bytes;
945         unsigned long ram_size;
946         u64 cur_alloc_size = 0;
947         u64 blocksize = fs_info->sectorsize;
948         struct btrfs_key ins;
949         struct extent_map *em;
950         unsigned clear_bits;
951         unsigned long page_ops;
952         bool extent_reserved = false;
953         int ret = 0;
954 
955         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
956                 WARN_ON_ONCE(1);
957                 ret = -EINVAL;
958                 goto out_unlock;
959         }
960 
961         num_bytes = ALIGN(end - start + 1, blocksize);
962         num_bytes = max(blocksize,  num_bytes);
963         ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
964 
965         inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
966 
967         if (start == 0) {
968                 /* lets try to make an inline extent */
969                 ret = cow_file_range_inline(inode, start, end, 0,
970                                             BTRFS_COMPRESS_NONE, NULL);
971                 if (ret == 0) {
972                         /*
973                          * We use DO_ACCOUNTING here because we need the
974                          * delalloc_release_metadata to be run _after_ we drop
975                          * our outstanding extent for clearing delalloc for this
976                          * range.
977                          */
978                         extent_clear_unlock_delalloc(inode, start, end,
979                                      delalloc_end, NULL,
980                                      EXTENT_LOCKED | EXTENT_DELALLOC |
981                                      EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
982                                      EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
983                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
984                                      PAGE_END_WRITEBACK);
985                         *nr_written = *nr_written +
986                              (end - start + PAGE_SIZE) / PAGE_SIZE;
987                         *page_started = 1;
988                         goto out;
989                 } else if (ret < 0) {
990                         goto out_unlock;
991                 }
992         }
993 
994         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
995         btrfs_drop_extent_cache(BTRFS_I(inode), start,
996                         start + num_bytes - 1, 0);
997 
998         while (num_bytes > 0) {
999                 cur_alloc_size = num_bytes;
1000                 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1001                                            fs_info->sectorsize, 0, alloc_hint,
1002                                            &ins, 1, 1);
1003                 if (ret < 0)
1004                         goto out_unlock;
1005                 cur_alloc_size = ins.offset;
1006                 extent_reserved = true;
1007 
1008                 ram_size = ins.offset;
1009                 em = create_io_em(inode, start, ins.offset, /* len */
1010                                   start, /* orig_start */
1011                                   ins.objectid, /* block_start */
1012                                   ins.offset, /* block_len */
1013                                   ins.offset, /* orig_block_len */
1014                                   ram_size, /* ram_bytes */
1015                                   BTRFS_COMPRESS_NONE, /* compress_type */
1016                                   BTRFS_ORDERED_REGULAR /* type */);
1017                 if (IS_ERR(em)) {
1018                         ret = PTR_ERR(em);
1019                         goto out_reserve;
1020                 }
1021                 free_extent_map(em);
1022 
1023                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1024                                                ram_size, cur_alloc_size, 0);
1025                 if (ret)
1026                         goto out_drop_extent_cache;
1027 
1028                 if (root->root_key.objectid ==
1029                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1030                         ret = btrfs_reloc_clone_csums(inode, start,
1031                                                       cur_alloc_size);
1032                         /*
1033                          * Only drop cache here, and process as normal.
1034                          *
1035                          * We must not allow extent_clear_unlock_delalloc()
1036                          * at out_unlock label to free meta of this ordered
1037                          * extent, as its meta should be freed by
1038                          * btrfs_finish_ordered_io().
1039                          *
1040                          * So we must continue until @start is increased to
1041                          * skip current ordered extent.
1042                          */
1043                         if (ret)
1044                                 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1045                                                 start + ram_size - 1, 0);
1046                 }
1047 
1048                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1049 
1050                 /* we're not doing compressed IO, don't unlock the first
1051                  * page (which the caller expects to stay locked), don't
1052                  * clear any dirty bits and don't set any writeback bits
1053                  *
1054                  * Do set the Private2 bit so we know this page was properly
1055                  * setup for writepage
1056                  */
1057                 page_ops = unlock ? PAGE_UNLOCK : 0;
1058                 page_ops |= PAGE_SET_PRIVATE2;
1059 
1060                 extent_clear_unlock_delalloc(inode, start,
1061                                              start + ram_size - 1,
1062                                              delalloc_end, locked_page,
1063                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1064                                              page_ops);
1065                 if (num_bytes < cur_alloc_size)
1066                         num_bytes = 0;
1067                 else
1068                         num_bytes -= cur_alloc_size;
1069                 alloc_hint = ins.objectid + ins.offset;
1070                 start += cur_alloc_size;
1071                 extent_reserved = false;
1072 
1073                 /*
1074                  * btrfs_reloc_clone_csums() error, since start is increased
1075                  * extent_clear_unlock_delalloc() at out_unlock label won't
1076                  * free metadata of current ordered extent, we're OK to exit.
1077                  */
1078                 if (ret)
1079                         goto out_unlock;
1080         }
1081 out:
1082         return ret;
1083 
1084 out_drop_extent_cache:
1085         btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1086 out_reserve:
1087         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1088         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1089 out_unlock:
1090         clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1091                 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1092         page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1093                 PAGE_END_WRITEBACK;
1094         /*
1095          * If we reserved an extent for our delalloc range (or a subrange) and
1096          * failed to create the respective ordered extent, then it means that
1097          * when we reserved the extent we decremented the extent's size from
1098          * the data space_info's bytes_may_use counter and incremented the
1099          * space_info's bytes_reserved counter by the same amount. We must make
1100          * sure extent_clear_unlock_delalloc() does not try to decrement again
1101          * the data space_info's bytes_may_use counter, therefore we do not pass
1102          * it the flag EXTENT_CLEAR_DATA_RESV.
1103          */
1104         if (extent_reserved) {
1105                 extent_clear_unlock_delalloc(inode, start,
1106                                              start + cur_alloc_size,
1107                                              start + cur_alloc_size,
1108                                              locked_page,
1109                                              clear_bits,
1110                                              page_ops);
1111                 start += cur_alloc_size;
1112                 if (start >= end)
1113                         goto out;
1114         }
1115         extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1116                                      locked_page,
1117                                      clear_bits | EXTENT_CLEAR_DATA_RESV,
1118                                      page_ops);
1119         goto out;
1120 }
1121 
1122 /*
1123  * work queue call back to started compression on a file and pages
1124  */
1125 static noinline void async_cow_start(struct btrfs_work *work)
1126 {
1127         struct async_cow *async_cow;
1128         int num_added = 0;
1129         async_cow = container_of(work, struct async_cow, work);
1130 
1131         compress_file_range(async_cow->inode, async_cow->locked_page,
1132                             async_cow->start, async_cow->end, async_cow,
1133                             &num_added);
1134         if (num_added == 0) {
1135                 btrfs_add_delayed_iput(async_cow->inode);
1136                 async_cow->inode = NULL;
1137         }
1138 }
1139 
1140 /*
1141  * work queue call back to submit previously compressed pages
1142  */
1143 static noinline void async_cow_submit(struct btrfs_work *work)
1144 {
1145         struct btrfs_fs_info *fs_info;
1146         struct async_cow *async_cow;
1147         struct btrfs_root *root;
1148         unsigned long nr_pages;
1149 
1150         async_cow = container_of(work, struct async_cow, work);
1151 
1152         root = async_cow->root;
1153         fs_info = root->fs_info;
1154         nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1155                 PAGE_SHIFT;
1156 
1157         /* atomic_sub_return implies a barrier */
1158         if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1159             5 * SZ_1M)
1160                 cond_wake_up_nomb(&fs_info->async_submit_wait);
1161 
1162         if (async_cow->inode)
1163                 submit_compressed_extents(async_cow->inode, async_cow);
1164 }
1165 
1166 static noinline void async_cow_free(struct btrfs_work *work)
1167 {
1168         struct async_cow *async_cow;
1169         async_cow = container_of(work, struct async_cow, work);
1170         if (async_cow->inode)
1171                 btrfs_add_delayed_iput(async_cow->inode);
1172         kfree(async_cow);
1173 }
1174 
1175 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1176                                 u64 start, u64 end, int *page_started,
1177                                 unsigned long *nr_written,
1178                                 unsigned int write_flags)
1179 {
1180         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1181         struct async_cow *async_cow;
1182         struct btrfs_root *root = BTRFS_I(inode)->root;
1183         unsigned long nr_pages;
1184         u64 cur_end;
1185 
1186         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1187                          1, 0, NULL);
1188         while (start < end) {
1189                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1190                 BUG_ON(!async_cow); /* -ENOMEM */
1191                 async_cow->inode = igrab(inode);
1192                 async_cow->root = root;
1193                 async_cow->locked_page = locked_page;
1194                 async_cow->start = start;
1195                 async_cow->write_flags = write_flags;
1196 
1197                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1198                     !btrfs_test_opt(fs_info, FORCE_COMPRESS))
1199                         cur_end = end;
1200                 else
1201                         cur_end = min(end, start + SZ_512K - 1);
1202 
1203                 async_cow->end = cur_end;
1204                 INIT_LIST_HEAD(&async_cow->extents);
1205 
1206                 btrfs_init_work(&async_cow->work,
1207                                 btrfs_delalloc_helper,
1208                                 async_cow_start, async_cow_submit,
1209                                 async_cow_free);
1210 
1211                 nr_pages = (cur_end - start + PAGE_SIZE) >>
1212                         PAGE_SHIFT;
1213                 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1214 
1215                 btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
1216 
1217                 *nr_written += nr_pages;
1218                 start = cur_end + 1;
1219         }
1220         *page_started = 1;
1221         return 0;
1222 }
1223 
1224 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1225                                         u64 bytenr, u64 num_bytes)
1226 {
1227         int ret;
1228         struct btrfs_ordered_sum *sums;
1229         LIST_HEAD(list);
1230 
1231         ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1232                                        bytenr + num_bytes - 1, &list, 0);
1233         if (ret == 0 && list_empty(&list))
1234                 return 0;
1235 
1236         while (!list_empty(&list)) {
1237                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1238                 list_del(&sums->list);
1239                 kfree(sums);
1240         }
1241         if (ret < 0)
1242                 return ret;
1243         return 1;
1244 }
1245 
1246 /*
1247  * when nowcow writeback call back.  This checks for snapshots or COW copies
1248  * of the extents that exist in the file, and COWs the file as required.
1249  *
1250  * If no cow copies or snapshots exist, we write directly to the existing
1251  * blocks on disk
1252  */
1253 static noinline int run_delalloc_nocow(struct inode *inode,
1254                                        struct page *locked_page,
1255                               u64 start, u64 end, int *page_started, int force,
1256                               unsigned long *nr_written)
1257 {
1258         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1259         struct btrfs_root *root = BTRFS_I(inode)->root;
1260         struct extent_buffer *leaf;
1261         struct btrfs_path *path;
1262         struct btrfs_file_extent_item *fi;
1263         struct btrfs_key found_key;
1264         struct extent_map *em;
1265         u64 cow_start;
1266         u64 cur_offset;
1267         u64 extent_end;
1268         u64 extent_offset;
1269         u64 disk_bytenr;
1270         u64 num_bytes;
1271         u64 disk_num_bytes;
1272         u64 ram_bytes;
1273         int extent_type;
1274         int ret;
1275         int type;
1276         int nocow;
1277         int check_prev = 1;
1278         bool nolock;
1279         u64 ino = btrfs_ino(BTRFS_I(inode));
1280 
1281         path = btrfs_alloc_path();
1282         if (!path) {
1283                 extent_clear_unlock_delalloc(inode, start, end, end,
1284                                              locked_page,
1285                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1286                                              EXTENT_DO_ACCOUNTING |
1287                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1288                                              PAGE_CLEAR_DIRTY |
1289                                              PAGE_SET_WRITEBACK |
1290                                              PAGE_END_WRITEBACK);
1291                 return -ENOMEM;
1292         }
1293 
1294         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
1295 
1296         cow_start = (u64)-1;
1297         cur_offset = start;
1298         while (1) {
1299                 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1300                                                cur_offset, 0);
1301                 if (ret < 0)
1302                         goto error;
1303                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1304                         leaf = path->nodes[0];
1305                         btrfs_item_key_to_cpu(leaf, &found_key,
1306                                               path->slots[0] - 1);
1307                         if (found_key.objectid == ino &&
1308                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1309                                 path->slots[0]--;
1310                 }
1311                 check_prev = 0;
1312 next_slot:
1313                 leaf = path->nodes[0];
1314                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1315                         ret = btrfs_next_leaf(root, path);
1316                         if (ret < 0) {
1317                                 if (cow_start != (u64)-1)
1318                                         cur_offset = cow_start;
1319                                 goto error;
1320                         }
1321                         if (ret > 0)
1322                                 break;
1323                         leaf = path->nodes[0];
1324                 }
1325 
1326                 nocow = 0;
1327                 disk_bytenr = 0;
1328                 num_bytes = 0;
1329                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1330 
1331                 if (found_key.objectid > ino)
1332                         break;
1333                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1334                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1335                         path->slots[0]++;
1336                         goto next_slot;
1337                 }
1338                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1339                     found_key.offset > end)
1340                         break;
1341 
1342                 if (found_key.offset > cur_offset) {
1343                         extent_end = found_key.offset;
1344                         extent_type = 0;
1345                         goto out_check;
1346                 }
1347 
1348                 fi = btrfs_item_ptr(leaf, path->slots[0],
1349                                     struct btrfs_file_extent_item);
1350                 extent_type = btrfs_file_extent_type(leaf, fi);
1351 
1352                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1353                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1354                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1355                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1356                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1357                         extent_end = found_key.offset +
1358                                 btrfs_file_extent_num_bytes(leaf, fi);
1359                         disk_num_bytes =
1360                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1361                         if (extent_end <= start) {
1362                                 path->slots[0]++;
1363                                 goto next_slot;
1364                         }
1365                         if (disk_bytenr == 0)
1366                                 goto out_check;
1367                         if (btrfs_file_extent_compression(leaf, fi) ||
1368                             btrfs_file_extent_encryption(leaf, fi) ||
1369                             btrfs_file_extent_other_encoding(leaf, fi))
1370                                 goto out_check;
1371                         /*
1372                          * Do the same check as in btrfs_cross_ref_exist but
1373                          * without the unnecessary search.
1374                          */
1375                         if (btrfs_file_extent_generation(leaf, fi) <=
1376                             btrfs_root_last_snapshot(&root->root_item))
1377                                 goto out_check;
1378                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1379                                 goto out_check;
1380                         if (btrfs_extent_readonly(fs_info, disk_bytenr))
1381                                 goto out_check;
1382                         ret = btrfs_cross_ref_exist(root, ino,
1383                                                     found_key.offset -
1384                                                     extent_offset, disk_bytenr);
1385                         if (ret) {
1386                                 /*
1387                                  * ret could be -EIO if the above fails to read
1388                                  * metadata.
1389                                  */
1390                                 if (ret < 0) {
1391                                         if (cow_start != (u64)-1)
1392                                                 cur_offset = cow_start;
1393                                         goto error;
1394                                 }
1395 
1396                                 WARN_ON_ONCE(nolock);
1397                                 goto out_check;
1398                         }
1399                         disk_bytenr += extent_offset;
1400                         disk_bytenr += cur_offset - found_key.offset;
1401                         num_bytes = min(end + 1, extent_end) - cur_offset;
1402                         /*
1403                          * if there are pending snapshots for this root,
1404                          * we fall into common COW way.
1405                          */
1406                         if (!nolock && atomic_read(&root->snapshot_force_cow))
1407                                 goto out_check;
1408                         /*
1409                          * force cow if csum exists in the range.
1410                          * this ensure that csum for a given extent are
1411                          * either valid or do not exist.
1412                          */
1413                         ret = csum_exist_in_range(fs_info, disk_bytenr,
1414                                                   num_bytes);
1415                         if (ret) {
1416                                 /*
1417                                  * ret could be -EIO if the above fails to read
1418                                  * metadata.
1419                                  */
1420                                 if (ret < 0) {
1421                                         if (cow_start != (u64)-1)
1422                                                 cur_offset = cow_start;
1423                                         goto error;
1424                                 }
1425                                 WARN_ON_ONCE(nolock);
1426                                 goto out_check;
1427                         }
1428                         if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1429                                 goto out_check;
1430                         nocow = 1;
1431                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1432                         extent_end = found_key.offset +
1433                                 btrfs_file_extent_ram_bytes(leaf, fi);
1434                         extent_end = ALIGN(extent_end,
1435                                            fs_info->sectorsize);
1436                 } else {
1437                         BUG_ON(1);
1438                 }
1439 out_check:
1440                 if (extent_end <= start) {
1441                         path->slots[0]++;
1442                         if (nocow)
1443                                 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1444                         goto next_slot;
1445                 }
1446                 if (!nocow) {
1447                         if (cow_start == (u64)-1)
1448                                 cow_start = cur_offset;
1449                         cur_offset = extent_end;
1450                         if (cur_offset > end)
1451                                 break;
1452                         path->slots[0]++;
1453                         goto next_slot;
1454                 }
1455 
1456                 btrfs_release_path(path);
1457                 if (cow_start != (u64)-1) {
1458                         ret = cow_file_range(inode, locked_page,
1459                                              cow_start, found_key.offset - 1,
1460                                              end, page_started, nr_written, 1,
1461                                              NULL);
1462                         if (ret) {
1463                                 if (nocow)
1464                                         btrfs_dec_nocow_writers(fs_info,
1465                                                                 disk_bytenr);
1466                                 goto error;
1467                         }
1468                         cow_start = (u64)-1;
1469                 }
1470 
1471                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1472                         u64 orig_start = found_key.offset - extent_offset;
1473 
1474                         em = create_io_em(inode, cur_offset, num_bytes,
1475                                           orig_start,
1476                                           disk_bytenr, /* block_start */
1477                                           num_bytes, /* block_len */
1478                                           disk_num_bytes, /* orig_block_len */
1479                                           ram_bytes, BTRFS_COMPRESS_NONE,
1480                                           BTRFS_ORDERED_PREALLOC);
1481                         if (IS_ERR(em)) {
1482                                 if (nocow)
1483                                         btrfs_dec_nocow_writers(fs_info,
1484                                                                 disk_bytenr);
1485                                 ret = PTR_ERR(em);
1486                                 goto error;
1487                         }
1488                         free_extent_map(em);
1489                 }
1490 
1491                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1492                         type = BTRFS_ORDERED_PREALLOC;
1493                 } else {
1494                         type = BTRFS_ORDERED_NOCOW;
1495                 }
1496 
1497                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1498                                                num_bytes, num_bytes, type);
1499                 if (nocow)
1500                         btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1501                 BUG_ON(ret); /* -ENOMEM */
1502 
1503                 if (root->root_key.objectid ==
1504                     BTRFS_DATA_RELOC_TREE_OBJECTID)
1505                         /*
1506                          * Error handled later, as we must prevent
1507                          * extent_clear_unlock_delalloc() in error handler
1508                          * from freeing metadata of created ordered extent.
1509                          */
1510                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1511                                                       num_bytes);
1512 
1513                 extent_clear_unlock_delalloc(inode, cur_offset,
1514                                              cur_offset + num_bytes - 1, end,
1515                                              locked_page, EXTENT_LOCKED |
1516                                              EXTENT_DELALLOC |
1517                                              EXTENT_CLEAR_DATA_RESV,
1518                                              PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1519 
1520                 cur_offset = extent_end;
1521 
1522                 /*
1523                  * btrfs_reloc_clone_csums() error, now we're OK to call error
1524                  * handler, as metadata for created ordered extent will only
1525                  * be freed by btrfs_finish_ordered_io().
1526                  */
1527                 if (ret)
1528                         goto error;
1529                 if (cur_offset > end)
1530                         break;
1531         }
1532         btrfs_release_path(path);
1533 
1534         if (cur_offset <= end && cow_start == (u64)-1) {
1535                 cow_start = cur_offset;
1536                 cur_offset = end;
1537         }
1538 
1539         if (cow_start != (u64)-1) {
1540                 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1541                                      page_started, nr_written, 1, NULL);
1542                 if (ret)
1543                         goto error;
1544         }
1545 
1546 error:
1547         if (ret && cur_offset < end)
1548                 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1549                                              locked_page, EXTENT_LOCKED |
1550                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1551                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1552                                              PAGE_CLEAR_DIRTY |
1553                                              PAGE_SET_WRITEBACK |
1554                                              PAGE_END_WRITEBACK);
1555         btrfs_free_path(path);
1556         return ret;
1557 }
1558 
1559 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1560 {
1561 
1562         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1563             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1564                 return 0;
1565 
1566         /*
1567          * @defrag_bytes is a hint value, no spinlock held here,
1568          * if is not zero, it means the file is defragging.
1569          * Force cow if given extent needs to be defragged.
1570          */
1571         if (BTRFS_I(inode)->defrag_bytes &&
1572             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1573                            EXTENT_DEFRAG, 0, NULL))
1574                 return 1;
1575 
1576         return 0;
1577 }
1578 
1579 /*
1580  * extent_io.c call back to do delayed allocation processing
1581  */
1582 static int run_delalloc_range(void *private_data, struct page *locked_page,
1583                               u64 start, u64 end, int *page_started,
1584                               unsigned long *nr_written,
1585                               struct writeback_control *wbc)
1586 {
1587         struct inode *inode = private_data;
1588         int ret;
1589         int force_cow = need_force_cow(inode, start, end);
1590         unsigned int write_flags = wbc_to_write_flags(wbc);
1591 
1592         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1593                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1594                                          page_started, 1, nr_written);
1595         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1596                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1597                                          page_started, 0, nr_written);
1598         } else if (!inode_need_compress(inode, start, end)) {
1599                 ret = cow_file_range(inode, locked_page, start, end, end,
1600                                       page_started, nr_written, 1, NULL);
1601         } else {
1602                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1603                         &BTRFS_I(inode)->runtime_flags);
1604                 ret = cow_file_range_async(inode, locked_page, start, end,
1605                                            page_started, nr_written,
1606                                            write_flags);
1607         }
1608         if (ret)
1609                 btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
1610         return ret;
1611 }
1612 
1613 static void btrfs_split_extent_hook(void *private_data,
1614                                     struct extent_state *orig, u64 split)
1615 {
1616         struct inode *inode = private_data;
1617         u64 size;
1618 
1619         /* not delalloc, ignore it */
1620         if (!(orig->state & EXTENT_DELALLOC))
1621                 return;
1622 
1623         size = orig->end - orig->start + 1;
1624         if (size > BTRFS_MAX_EXTENT_SIZE) {
1625                 u32 num_extents;
1626                 u64 new_size;
1627 
1628                 /*
1629                  * See the explanation in btrfs_merge_extent_hook, the same
1630                  * applies here, just in reverse.
1631                  */
1632                 new_size = orig->end - split + 1;
1633                 num_extents = count_max_extents(new_size);
1634                 new_size = split - orig->start;
1635                 num_extents += count_max_extents(new_size);
1636                 if (count_max_extents(size) >= num_extents)
1637                         return;
1638         }
1639 
1640         spin_lock(&BTRFS_I(inode)->lock);
1641         btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1642         spin_unlock(&BTRFS_I(inode)->lock);
1643 }
1644 
1645 /*
1646  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1647  * extents so we can keep track of new extents that are just merged onto old
1648  * extents, such as when we are doing sequential writes, so we can properly
1649  * account for the metadata space we'll need.
1650  */
1651 static void btrfs_merge_extent_hook(void *private_data,
1652                                     struct extent_state *new,
1653                                     struct extent_state *other)
1654 {
1655         struct inode *inode = private_data;
1656         u64 new_size, old_size;
1657         u32 num_extents;
1658 
1659         /* not delalloc, ignore it */
1660         if (!(other->state & EXTENT_DELALLOC))
1661                 return;
1662 
1663         if (new->start > other->start)
1664                 new_size = new->end - other->start + 1;
1665         else
1666                 new_size = other->end - new->start + 1;
1667 
1668         /* we're not bigger than the max, unreserve the space and go */
1669         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1670                 spin_lock(&BTRFS_I(inode)->lock);
1671                 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1672                 spin_unlock(&BTRFS_I(inode)->lock);
1673                 return;
1674         }
1675 
1676         /*
1677          * We have to add up either side to figure out how many extents were
1678          * accounted for before we merged into one big extent.  If the number of
1679          * extents we accounted for is <= the amount we need for the new range
1680          * then we can return, otherwise drop.  Think of it like this
1681          *
1682          * [ 4k][MAX_SIZE]
1683          *
1684          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1685          * need 2 outstanding extents, on one side we have 1 and the other side
1686          * we have 1 so they are == and we can return.  But in this case
1687          *
1688          * [MAX_SIZE+4k][MAX_SIZE+4k]
1689          *
1690          * Each range on their own accounts for 2 extents, but merged together
1691          * they are only 3 extents worth of accounting, so we need to drop in
1692          * this case.
1693          */
1694         old_size = other->end - other->start + 1;
1695         num_extents = count_max_extents(old_size);
1696         old_size = new->end - new->start + 1;
1697         num_extents += count_max_extents(old_size);
1698         if (count_max_extents(new_size) >= num_extents)
1699                 return;
1700 
1701         spin_lock(&BTRFS_I(inode)->lock);
1702         btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1703         spin_unlock(&BTRFS_I(inode)->lock);
1704 }
1705 
1706 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1707                                       struct inode *inode)
1708 {
1709         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1710 
1711         spin_lock(&root->delalloc_lock);
1712         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1713                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1714                               &root->delalloc_inodes);
1715                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1716                         &BTRFS_I(inode)->runtime_flags);
1717                 root->nr_delalloc_inodes++;
1718                 if (root->nr_delalloc_inodes == 1) {
1719                         spin_lock(&fs_info->delalloc_root_lock);
1720                         BUG_ON(!list_empty(&root->delalloc_root));
1721                         list_add_tail(&root->delalloc_root,
1722                                       &fs_info->delalloc_roots);
1723                         spin_unlock(&fs_info->delalloc_root_lock);
1724                 }
1725         }
1726         spin_unlock(&root->delalloc_lock);
1727 }
1728 
1729 
1730 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1731                                 struct btrfs_inode *inode)
1732 {
1733         struct btrfs_fs_info *fs_info = root->fs_info;
1734 
1735         if (!list_empty(&inode->delalloc_inodes)) {
1736                 list_del_init(&inode->delalloc_inodes);
1737                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1738                           &inode->runtime_flags);
1739                 root->nr_delalloc_inodes--;
1740                 if (!root->nr_delalloc_inodes) {
1741                         ASSERT(list_empty(&root->delalloc_inodes));
1742                         spin_lock(&fs_info->delalloc_root_lock);
1743                         BUG_ON(list_empty(&root->delalloc_root));
1744                         list_del_init(&root->delalloc_root);
1745                         spin_unlock(&fs_info->delalloc_root_lock);
1746                 }
1747         }
1748 }
1749 
1750 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1751                                      struct btrfs_inode *inode)
1752 {
1753         spin_lock(&root->delalloc_lock);
1754         __btrfs_del_delalloc_inode(root, inode);
1755         spin_unlock(&root->delalloc_lock);
1756 }
1757 
1758 /*
1759  * extent_io.c set_bit_hook, used to track delayed allocation
1760  * bytes in this file, and to maintain the list of inodes that
1761  * have pending delalloc work to be done.
1762  */
1763 static void btrfs_set_bit_hook(void *private_data,
1764                                struct extent_state *state, unsigned *bits)
1765 {
1766         struct inode *inode = private_data;
1767 
1768         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1769 
1770         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1771                 WARN_ON(1);
1772         /*
1773          * set_bit and clear bit hooks normally require _irqsave/restore
1774          * but in this case, we are only testing for the DELALLOC
1775          * bit, which is only set or cleared with irqs on
1776          */
1777         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1778                 struct btrfs_root *root = BTRFS_I(inode)->root;
1779                 u64 len = state->end + 1 - state->start;
1780                 u32 num_extents = count_max_extents(len);
1781                 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1782 
1783                 spin_lock(&BTRFS_I(inode)->lock);
1784                 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
1785                 spin_unlock(&BTRFS_I(inode)->lock);
1786 
1787                 /* For sanity tests */
1788                 if (btrfs_is_testing(fs_info))
1789                         return;
1790 
1791                 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1792                                          fs_info->delalloc_batch);
1793                 spin_lock(&BTRFS_I(inode)->lock);
1794                 BTRFS_I(inode)->delalloc_bytes += len;
1795                 if (*bits & EXTENT_DEFRAG)
1796                         BTRFS_I(inode)->defrag_bytes += len;
1797                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1798                                          &BTRFS_I(inode)->runtime_flags))
1799                         btrfs_add_delalloc_inodes(root, inode);
1800                 spin_unlock(&BTRFS_I(inode)->lock);
1801         }
1802 
1803         if (!(state->state & EXTENT_DELALLOC_NEW) &&
1804             (*bits & EXTENT_DELALLOC_NEW)) {
1805                 spin_lock(&BTRFS_I(inode)->lock);
1806                 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1807                         state->start;
1808                 spin_unlock(&BTRFS_I(inode)->lock);
1809         }
1810 }
1811 
1812 /*
1813  * extent_io.c clear_bit_hook, see set_bit_hook for why
1814  */
1815 static void btrfs_clear_bit_hook(void *private_data,
1816                                  struct extent_state *state,
1817                                  unsigned *bits)
1818 {
1819         struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data);
1820         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1821         u64 len = state->end + 1 - state->start;
1822         u32 num_extents = count_max_extents(len);
1823 
1824         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1825                 spin_lock(&inode->lock);
1826                 inode->defrag_bytes -= len;
1827                 spin_unlock(&inode->lock);
1828         }
1829 
1830         /*
1831          * set_bit and clear bit hooks normally require _irqsave/restore
1832          * but in this case, we are only testing for the DELALLOC
1833          * bit, which is only set or cleared with irqs on
1834          */
1835         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1836                 struct btrfs_root *root = inode->root;
1837                 bool do_list = !btrfs_is_free_space_inode(inode);
1838 
1839                 spin_lock(&inode->lock);
1840                 btrfs_mod_outstanding_extents(inode, -num_extents);
1841                 spin_unlock(&inode->lock);
1842 
1843                 /*
1844                  * We don't reserve metadata space for space cache inodes so we
1845                  * don't need to call dellalloc_release_metadata if there is an
1846                  * error.
1847                  */
1848                 if (*bits & EXTENT_CLEAR_META_RESV &&
1849                     root != fs_info->tree_root)
1850                         btrfs_delalloc_release_metadata(inode, len, false);
1851 
1852                 /* For sanity tests. */
1853                 if (btrfs_is_testing(fs_info))
1854                         return;
1855 
1856                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1857                     do_list && !(state->state & EXTENT_NORESERVE) &&
1858                     (*bits & EXTENT_CLEAR_DATA_RESV))
1859                         btrfs_free_reserved_data_space_noquota(
1860                                         &inode->vfs_inode,
1861                                         state->start, len);
1862 
1863                 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
1864                                          fs_info->delalloc_batch);
1865                 spin_lock(&inode->lock);
1866                 inode->delalloc_bytes -= len;
1867                 if (do_list && inode->delalloc_bytes == 0 &&
1868                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1869                                         &inode->runtime_flags))
1870                         btrfs_del_delalloc_inode(root, inode);
1871                 spin_unlock(&inode->lock);
1872         }
1873 
1874         if ((state->state & EXTENT_DELALLOC_NEW) &&
1875             (*bits & EXTENT_DELALLOC_NEW)) {
1876                 spin_lock(&inode->lock);
1877                 ASSERT(inode->new_delalloc_bytes >= len);
1878                 inode->new_delalloc_bytes -= len;
1879                 spin_unlock(&inode->lock);
1880         }
1881 }
1882 
1883 /*
1884  * Merge bio hook, this must check the chunk tree to make sure we don't create
1885  * bios that span stripes or chunks
1886  *
1887  * return 1 if page cannot be merged to bio
1888  * return 0 if page can be merged to bio
1889  * return error otherwise
1890  */
1891 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1892                          size_t size, struct bio *bio,
1893                          unsigned long bio_flags)
1894 {
1895         struct inode *inode = page->mapping->host;
1896         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1897         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1898         u64 length = 0;
1899         u64 map_length;
1900         int ret;
1901 
1902         if (bio_flags & EXTENT_BIO_COMPRESSED)
1903                 return 0;
1904 
1905         length = bio->bi_iter.bi_size;
1906         map_length = length;
1907         ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
1908                               NULL, 0);
1909         if (ret < 0)
1910                 return ret;
1911         if (map_length < length + size)
1912                 return 1;
1913         return 0;
1914 }
1915 
1916 /*
1917  * in order to insert checksums into the metadata in large chunks,
1918  * we wait until bio submission time.   All the pages in the bio are
1919  * checksummed and sums are attached onto the ordered extent record.
1920  *
1921  * At IO completion time the cums attached on the ordered extent record
1922  * are inserted into the btree
1923  */
1924 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
1925                                     u64 bio_offset)
1926 {
1927         struct inode *inode = private_data;
1928         blk_status_t ret = 0;
1929 
1930         ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1931         BUG_ON(ret); /* -ENOMEM */
1932         return 0;
1933 }
1934 
1935 /*
1936  * in order to insert checksums into the metadata in large chunks,
1937  * we wait until bio submission time.   All the pages in the bio are
1938  * checksummed and sums are attached onto the ordered extent record.
1939  *
1940  * At IO completion time the cums attached on the ordered extent record
1941  * are inserted into the btree
1942  */
1943 blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
1944                           int mirror_num)
1945 {
1946         struct inode *inode = private_data;
1947         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1948         blk_status_t ret;
1949 
1950         ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
1951         if (ret) {
1952                 bio->bi_status = ret;
1953                 bio_endio(bio);
1954         }
1955         return ret;
1956 }
1957 
1958 /*
1959  * extent_io.c submission hook. This does the right thing for csum calculation
1960  * on write, or reading the csums from the tree before a read.
1961  *
1962  * Rules about async/sync submit,
1963  * a) read:                             sync submit
1964  *
1965  * b) write without checksum:           sync submit
1966  *
1967  * c) write with checksum:
1968  *    c-1) if bio is issued by fsync:   sync submit
1969  *         (sync_writers != 0)
1970  *
1971  *    c-2) if root is reloc root:       sync submit
1972  *         (only in case of buffered IO)
1973  *
1974  *    c-3) otherwise:                   async submit
1975  */
1976 static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
1977                                  int mirror_num, unsigned long bio_flags,
1978                                  u64 bio_offset)
1979 {
1980         struct inode *inode = private_data;
1981         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1982         struct btrfs_root *root = BTRFS_I(inode)->root;
1983         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1984         blk_status_t ret = 0;
1985         int skip_sum;
1986         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1987 
1988         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1989 
1990         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
1991                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1992 
1993         if (bio_op(bio) != REQ_OP_WRITE) {
1994                 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
1995                 if (ret)
1996                         goto out;
1997 
1998                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1999                         ret = btrfs_submit_compressed_read(inode, bio,
2000                                                            mirror_num,
2001                                                            bio_flags);
2002                         goto out;
2003                 } else if (!skip_sum) {
2004                         ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2005                         if (ret)
2006                                 goto out;
2007                 }
2008                 goto mapit;
2009         } else if (async && !skip_sum) {
2010                 /* csum items have already been cloned */
2011                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2012                         goto mapit;
2013                 /* we're doing a write, do the async checksumming */
2014                 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2015                                           bio_offset, inode,
2016                                           btrfs_submit_bio_start);
2017                 goto out;
2018         } else if (!skip_sum) {
2019                 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2020                 if (ret)
2021                         goto out;
2022         }
2023 
2024 mapit:
2025         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
2026 
2027 out:
2028         if (ret) {
2029                 bio->bi_status = ret;
2030                 bio_endio(bio);
2031         }
2032         return ret;
2033 }
2034 
2035 /*
2036  * given a list of ordered sums record them in the inode.  This happens
2037  * at IO completion time based on sums calculated at bio submission time.
2038  */
2039 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2040                              struct inode *inode, struct list_head *list)
2041 {
2042         struct btrfs_ordered_sum *sum;
2043         int ret;
2044 
2045         list_for_each_entry(sum, list, list) {
2046                 trans->adding_csums = true;
2047                 ret = btrfs_csum_file_blocks(trans,
2048                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
2049                 trans->adding_csums = false;
2050                 if (ret)
2051                         return ret;
2052         }
2053         return 0;
2054 }
2055 
2056 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2057                               unsigned int extra_bits,
2058                               struct extent_state **cached_state, int dedupe)
2059 {
2060         WARN_ON((end & (PAGE_SIZE - 1)) == 0);
2061         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2062                                    extra_bits, cached_state);
2063 }
2064 
2065 /* see btrfs_writepage_start_hook for details on why this is required */
2066 struct btrfs_writepage_fixup {
2067         struct page *page;
2068         struct btrfs_work work;
2069 };
2070 
2071 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2072 {
2073         struct btrfs_writepage_fixup *fixup;
2074         struct btrfs_ordered_extent *ordered;
2075         struct extent_state *cached_state = NULL;
2076         struct extent_changeset *data_reserved = NULL;
2077         struct page *page;
2078         struct inode *inode;
2079         u64 page_start;
2080         u64 page_end;
2081         int ret;
2082 
2083         fixup = container_of(work, struct btrfs_writepage_fixup, work);
2084         page = fixup->page;
2085 again:
2086         lock_page(page);
2087         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2088                 ClearPageChecked(page);
2089                 goto out_page;
2090         }
2091 
2092         inode = page->mapping->host;
2093         page_start = page_offset(page);
2094         page_end = page_offset(page) + PAGE_SIZE - 1;
2095 
2096         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2097                          &cached_state);
2098 
2099         /* already ordered? We're done */
2100         if (PagePrivate2(page))
2101                 goto out;
2102 
2103         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2104                                         PAGE_SIZE);
2105         if (ordered) {
2106                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2107                                      page_end, &cached_state);
2108                 unlock_page(page);
2109                 btrfs_start_ordered_extent(inode, ordered, 1);
2110                 btrfs_put_ordered_extent(ordered);
2111                 goto again;
2112         }
2113 
2114         ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2115                                            PAGE_SIZE);
2116         if (ret) {
2117                 mapping_set_error(page->mapping, ret);
2118                 end_extent_writepage(page, ret, page_start, page_end);
2119                 ClearPageChecked(page);
2120                 goto out;
2121          }
2122 
2123         ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2124                                         &cached_state, 0);
2125         if (ret) {
2126                 mapping_set_error(page->mapping, ret);
2127                 end_extent_writepage(page, ret, page_start, page_end);
2128                 ClearPageChecked(page);
2129                 goto out;
2130         }
2131 
2132         ClearPageChecked(page);
2133         set_page_dirty(page);
2134         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
2135 out:
2136         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2137                              &cached_state);
2138 out_page:
2139         unlock_page(page);
2140         put_page(page);
2141         kfree(fixup);
2142         extent_changeset_free(data_reserved);
2143 }
2144 
2145 /*
2146  * There are a few paths in the higher layers of the kernel that directly
2147  * set the page dirty bit without asking the filesystem if it is a
2148  * good idea.  This causes problems because we want to make sure COW
2149  * properly happens and the data=ordered rules are followed.
2150  *
2151  * In our case any range that doesn't have the ORDERED bit set
2152  * hasn't been properly setup for IO.  We kick off an async process
2153  * to fix it up.  The async helper will wait for ordered extents, set
2154  * the delalloc bit and make it safe to write the page.
2155  */
2156 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2157 {
2158         struct inode *inode = page->mapping->host;
2159         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2160         struct btrfs_writepage_fixup *fixup;
2161 
2162         /* this page is properly in the ordered list */
2163         if (TestClearPagePrivate2(page))
2164                 return 0;
2165 
2166         if (PageChecked(page))
2167                 return -EAGAIN;
2168 
2169         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2170         if (!fixup)
2171                 return -EAGAIN;
2172 
2173         SetPageChecked(page);
2174         get_page(page);
2175         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2176                         btrfs_writepage_fixup_worker, NULL, NULL);
2177         fixup->page = page;
2178         btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2179         return -EBUSY;
2180 }
2181 
2182 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2183                                        struct inode *inode, u64 file_pos,
2184                                        u64 disk_bytenr, u64 disk_num_bytes,
2185                                        u64 num_bytes, u64 ram_bytes,
2186                                        u8 compression, u8 encryption,
2187                                        u16 other_encoding, int extent_type)
2188 {
2189         struct btrfs_root *root = BTRFS_I(inode)->root;
2190         struct btrfs_file_extent_item *fi;
2191         struct btrfs_path *path;
2192         struct extent_buffer *leaf;
2193         struct btrfs_key ins;
2194         u64 qg_released;
2195         int extent_inserted = 0;
2196         int ret;
2197 
2198         path = btrfs_alloc_path();
2199         if (!path)
2200                 return -ENOMEM;
2201 
2202         /*
2203          * we may be replacing one extent in the tree with another.
2204          * The new extent is pinned in the extent map, and we don't want
2205          * to drop it from the cache until it is completely in the btree.
2206          *
2207          * So, tell btrfs_drop_extents to leave this extent in the cache.
2208          * the caller is expected to unpin it and allow it to be merged
2209          * with the others.
2210          */
2211         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2212                                    file_pos + num_bytes, NULL, 0,
2213                                    1, sizeof(*fi), &extent_inserted);
2214         if (ret)
2215                 goto out;
2216 
2217         if (!extent_inserted) {
2218                 ins.objectid = btrfs_ino(BTRFS_I(inode));
2219                 ins.offset = file_pos;
2220                 ins.type = BTRFS_EXTENT_DATA_KEY;
2221 
2222                 path->leave_spinning = 1;
2223                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2224                                               sizeof(*fi));
2225                 if (ret)
2226                         goto out;
2227         }
2228         leaf = path->nodes[0];
2229         fi = btrfs_item_ptr(leaf, path->slots[0],
2230                             struct btrfs_file_extent_item);
2231         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2232         btrfs_set_file_extent_type(leaf, fi, extent_type);
2233         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2234         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2235         btrfs_set_file_extent_offset(leaf, fi, 0);
2236         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2237         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2238         btrfs_set_file_extent_compression(leaf, fi, compression);
2239         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2240         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2241 
2242         btrfs_mark_buffer_dirty(leaf);
2243         btrfs_release_path(path);
2244 
2245         inode_add_bytes(inode, num_bytes);
2246 
2247         ins.objectid = disk_bytenr;
2248         ins.offset = disk_num_bytes;
2249         ins.type = BTRFS_EXTENT_ITEM_KEY;
2250 
2251         /*
2252          * Release the reserved range from inode dirty range map, as it is
2253          * already moved into delayed_ref_head
2254          */
2255         ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2256         if (ret < 0)
2257                 goto out;
2258         qg_released = ret;
2259         ret = btrfs_alloc_reserved_file_extent(trans, root,
2260                                                btrfs_ino(BTRFS_I(inode)),
2261                                                file_pos, qg_released, &ins);
2262 out:
2263         btrfs_free_path(path);
2264 
2265         return ret;
2266 }
2267 
2268 /* snapshot-aware defrag */
2269 struct sa_defrag_extent_backref {
2270         struct rb_node node;
2271         struct old_sa_defrag_extent *old;
2272         u64 root_id;
2273         u64 inum;
2274         u64 file_pos;
2275         u64 extent_offset;
2276         u64 num_bytes;
2277         u64 generation;
2278 };
2279 
2280 struct old_sa_defrag_extent {
2281         struct list_head list;
2282         struct new_sa_defrag_extent *new;
2283 
2284         u64 extent_offset;
2285         u64 bytenr;
2286         u64 offset;
2287         u64 len;
2288         int count;
2289 };
2290 
2291 struct new_sa_defrag_extent {
2292         struct rb_root root;
2293         struct list_head head;
2294         struct btrfs_path *path;
2295         struct inode *inode;
2296         u64 file_pos;
2297         u64 len;
2298         u64 bytenr;
2299         u64 disk_len;
2300         u8 compress_type;
2301 };
2302 
2303 static int backref_comp(struct sa_defrag_extent_backref *b1,
2304                         struct sa_defrag_extent_backref *b2)
2305 {
2306         if (b1->root_id < b2->root_id)
2307                 return -1;
2308         else if (b1->root_id > b2->root_id)
2309                 return 1;
2310 
2311         if (b1->inum < b2->inum)
2312                 return -1;
2313         else if (b1->inum > b2->inum)
2314                 return 1;
2315 
2316         if (b1->file_pos < b2->file_pos)
2317                 return -1;
2318         else if (b1->file_pos > b2->file_pos)
2319                 return 1;
2320 
2321         /*
2322          * [------------------------------] ===> (a range of space)
2323          *     |<--->|   |<---->| =============> (fs/file tree A)
2324          * |<---------------------------->| ===> (fs/file tree B)
2325          *
2326          * A range of space can refer to two file extents in one tree while
2327          * refer to only one file extent in another tree.
2328          *
2329          * So we may process a disk offset more than one time(two extents in A)
2330          * and locate at the same extent(one extent in B), then insert two same
2331          * backrefs(both refer to the extent in B).
2332          */
2333         return 0;
2334 }
2335 
2336 static void backref_insert(struct rb_root *root,
2337                            struct sa_defrag_extent_backref *backref)
2338 {
2339         struct rb_node **p = &root->rb_node;
2340         struct rb_node *parent = NULL;
2341         struct sa_defrag_extent_backref *entry;
2342         int ret;
2343 
2344         while (*p) {
2345                 parent = *p;
2346                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2347 
2348                 ret = backref_comp(backref, entry);
2349                 if (ret < 0)
2350                         p = &(*p)->rb_left;
2351                 else
2352                         p = &(*p)->rb_right;
2353         }
2354 
2355         rb_link_node(&backref->node, parent, p);
2356         rb_insert_color(&backref->node, root);
2357 }
2358 
2359 /*
2360  * Note the backref might has changed, and in this case we just return 0.
2361  */
2362 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2363                                        void *ctx)
2364 {
2365         struct btrfs_file_extent_item *extent;
2366         struct old_sa_defrag_extent *old = ctx;
2367         struct new_sa_defrag_extent *new = old->new;
2368         struct btrfs_path *path = new->path;
2369         struct btrfs_key key;
2370         struct btrfs_root *root;
2371         struct sa_defrag_extent_backref *backref;
2372         struct extent_buffer *leaf;
2373         struct inode *inode = new->inode;
2374         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2375         int slot;
2376         int ret;
2377         u64 extent_offset;
2378         u64 num_bytes;
2379 
2380         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2381             inum == btrfs_ino(BTRFS_I(inode)))
2382                 return 0;
2383 
2384         key.objectid = root_id;
2385         key.type = BTRFS_ROOT_ITEM_KEY;
2386         key.offset = (u64)-1;
2387 
2388         root = btrfs_read_fs_root_no_name(fs_info, &key);
2389         if (IS_ERR(root)) {
2390                 if (PTR_ERR(root) == -ENOENT)
2391                         return 0;
2392                 WARN_ON(1);
2393                 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2394                          inum, offset, root_id);
2395                 return PTR_ERR(root);
2396         }
2397 
2398         key.objectid = inum;
2399         key.type = BTRFS_EXTENT_DATA_KEY;
2400         if (offset > (u64)-1 << 32)
2401                 key.offset = 0;
2402         else
2403                 key.offset = offset;
2404 
2405         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2406         if (WARN_ON(ret < 0))
2407                 return ret;
2408         ret = 0;
2409 
2410         while (1) {
2411                 cond_resched();
2412 
2413                 leaf = path->nodes[0];
2414                 slot = path->slots[0];
2415 
2416                 if (slot >= btrfs_header_nritems(leaf)) {
2417                         ret = btrfs_next_leaf(root, path);
2418                         if (ret < 0) {
2419                                 goto out;
2420                         } else if (ret > 0) {
2421                                 ret = 0;
2422                                 goto out;
2423                         }
2424                         continue;
2425                 }
2426 
2427                 path->slots[0]++;
2428 
2429                 btrfs_item_key_to_cpu(leaf, &key, slot);
2430 
2431                 if (key.objectid > inum)
2432                         goto out;
2433 
2434                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2435                         continue;
2436 
2437                 extent = btrfs_item_ptr(leaf, slot,
2438                                         struct btrfs_file_extent_item);
2439 
2440                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2441                         continue;
2442 
2443                 /*
2444                  * 'offset' refers to the exact key.offset,
2445                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2446                  * (key.offset - extent_offset).
2447                  */
2448                 if (key.offset != offset)
2449                         continue;
2450 
2451                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2452                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2453 
2454                 if (extent_offset >= old->extent_offset + old->offset +
2455                     old->len || extent_offset + num_bytes <=
2456                     old->extent_offset + old->offset)
2457                         continue;
2458                 break;
2459         }
2460 
2461         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2462         if (!backref) {
2463                 ret = -ENOENT;
2464                 goto out;
2465         }
2466 
2467         backref->root_id = root_id;
2468         backref->inum = inum;
2469         backref->file_pos = offset;
2470         backref->num_bytes = num_bytes;
2471         backref->extent_offset = extent_offset;
2472         backref->generation = btrfs_file_extent_generation(leaf, extent);
2473         backref->old = old;
2474         backref_insert(&new->root, backref);
2475         old->count++;
2476 out:
2477         btrfs_release_path(path);
2478         WARN_ON(ret);
2479         return ret;
2480 }
2481 
2482 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2483                                    struct new_sa_defrag_extent *new)
2484 {
2485         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2486         struct old_sa_defrag_extent *old, *tmp;
2487         int ret;
2488 
2489         new->path = path;
2490 
2491         list_for_each_entry_safe(old, tmp, &new->head, list) {
2492                 ret = iterate_inodes_from_logical(old->bytenr +
2493                                                   old->extent_offset, fs_info,
2494                                                   path, record_one_backref,
2495                                                   old, false);
2496                 if (ret < 0 && ret != -ENOENT)
2497                         return false;
2498 
2499                 /* no backref to be processed for this extent */
2500                 if (!old->count) {
2501                         list_del(&old->list);
2502                         kfree(old);
2503                 }
2504         }
2505 
2506         if (list_empty(&new->head))
2507                 return false;
2508 
2509         return true;
2510 }
2511 
2512 static int relink_is_mergable(struct extent_buffer *leaf,
2513                               struct btrfs_file_extent_item *fi,
2514                               struct new_sa_defrag_extent *new)
2515 {
2516         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2517                 return 0;
2518 
2519         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2520                 return 0;
2521 
2522         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2523                 return 0;
2524 
2525         if (btrfs_file_extent_encryption(leaf, fi) ||
2526             btrfs_file_extent_other_encoding(leaf, fi))
2527                 return 0;
2528 
2529         return 1;
2530 }
2531 
2532 /*
2533  * Note the backref might has changed, and in this case we just return 0.
2534  */
2535 static noinline int relink_extent_backref(struct btrfs_path *path,
2536                                  struct sa_defrag_extent_backref *prev,
2537                                  struct sa_defrag_extent_backref *backref)
2538 {
2539         struct btrfs_file_extent_item *extent;
2540         struct btrfs_file_extent_item *item;
2541         struct btrfs_ordered_extent *ordered;
2542         struct btrfs_trans_handle *trans;
2543         struct btrfs_root *root;
2544         struct btrfs_key key;
2545         struct extent_buffer *leaf;
2546         struct old_sa_defrag_extent *old = backref->old;
2547         struct new_sa_defrag_extent *new = old->new;
2548         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2549         struct inode *inode;
2550         struct extent_state *cached = NULL;
2551         int ret = 0;
2552         u64 start;
2553         u64 len;
2554         u64 lock_start;
2555         u64 lock_end;
2556         bool merge = false;
2557         int index;
2558 
2559         if (prev && prev->root_id == backref->root_id &&
2560             prev->inum == backref->inum &&
2561             prev->file_pos + prev->num_bytes == backref->file_pos)
2562                 merge = true;
2563 
2564         /* step 1: get root */
2565         key.objectid = backref->root_id;
2566         key.type = BTRFS_ROOT_ITEM_KEY;
2567         key.offset = (u64)-1;
2568 
2569         index = srcu_read_lock(&fs_info->subvol_srcu);
2570 
2571         root = btrfs_read_fs_root_no_name(fs_info, &key);
2572         if (IS_ERR(root)) {
2573                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2574                 if (PTR_ERR(root) == -ENOENT)
2575                         return 0;
2576                 return PTR_ERR(root);
2577         }
2578 
2579         if (btrfs_root_readonly(root)) {
2580                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2581                 return 0;
2582         }
2583 
2584         /* step 2: get inode */
2585         key.objectid = backref->inum;
2586         key.type = BTRFS_INODE_ITEM_KEY;
2587         key.offset = 0;
2588 
2589         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2590         if (IS_ERR(inode)) {
2591                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2592                 return 0;
2593         }
2594 
2595         srcu_read_unlock(&fs_info->subvol_srcu, index);
2596 
2597         /* step 3: relink backref */
2598         lock_start = backref->file_pos;
2599         lock_end = backref->file_pos + backref->num_bytes - 1;
2600         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2601                          &cached);
2602 
2603         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2604         if (ordered) {
2605                 btrfs_put_ordered_extent(ordered);
2606                 goto out_unlock;
2607         }
2608 
2609         trans = btrfs_join_transaction(root);
2610         if (IS_ERR(trans)) {
2611                 ret = PTR_ERR(trans);
2612                 goto out_unlock;
2613         }
2614 
2615         key.objectid = backref->inum;
2616         key.type = BTRFS_EXTENT_DATA_KEY;
2617         key.offset = backref->file_pos;
2618 
2619         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2620         if (ret < 0) {
2621                 goto out_free_path;
2622         } else if (ret > 0) {
2623                 ret = 0;
2624                 goto out_free_path;
2625         }
2626 
2627         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2628                                 struct btrfs_file_extent_item);
2629 
2630         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2631             backref->generation)
2632                 goto out_free_path;
2633 
2634         btrfs_release_path(path);
2635 
2636         start = backref->file_pos;
2637         if (backref->extent_offset < old->extent_offset + old->offset)
2638                 start += old->extent_offset + old->offset -
2639                          backref->extent_offset;
2640 
2641         len = min(backref->extent_offset + backref->num_bytes,
2642                   old->extent_offset + old->offset + old->len);
2643         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2644 
2645         ret = btrfs_drop_extents(trans, root, inode, start,
2646                                  start + len, 1);
2647         if (ret)
2648                 goto out_free_path;
2649 again:
2650         key.objectid = btrfs_ino(BTRFS_I(inode));
2651         key.type = BTRFS_EXTENT_DATA_KEY;
2652         key.offset = start;
2653 
2654         path->leave_spinning = 1;
2655         if (merge) {
2656                 struct btrfs_file_extent_item *fi;
2657                 u64 extent_len;
2658                 struct btrfs_key found_key;
2659 
2660                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2661                 if (ret < 0)
2662                         goto out_free_path;
2663 
2664                 path->slots[0]--;
2665                 leaf = path->nodes[0];
2666                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2667 
2668                 fi = btrfs_item_ptr(leaf, path->slots[0],
2669                                     struct btrfs_file_extent_item);
2670                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2671 
2672                 if (extent_len + found_key.offset == start &&
2673                     relink_is_mergable(leaf, fi, new)) {
2674                         btrfs_set_file_extent_num_bytes(leaf, fi,
2675                                                         extent_len + len);
2676                         btrfs_mark_buffer_dirty(leaf);
2677                         inode_add_bytes(inode, len);
2678 
2679                         ret = 1;
2680                         goto out_free_path;
2681                 } else {
2682                         merge = false;
2683                         btrfs_release_path(path);
2684                         goto again;
2685                 }
2686         }
2687 
2688         ret = btrfs_insert_empty_item(trans, root, path, &key,
2689                                         sizeof(*extent));
2690         if (ret) {
2691                 btrfs_abort_transaction(trans, ret);
2692                 goto out_free_path;
2693         }
2694 
2695         leaf = path->nodes[0];
2696         item = btrfs_item_ptr(leaf, path->slots[0],
2697                                 struct btrfs_file_extent_item);
2698         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2699         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2700         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2701         btrfs_set_file_extent_num_bytes(leaf, item, len);
2702         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2703         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2704         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2705         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2706         btrfs_set_file_extent_encryption(leaf, item, 0);
2707         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2708 
2709         btrfs_mark_buffer_dirty(leaf);
2710         inode_add_bytes(inode, len);
2711         btrfs_release_path(path);
2712 
2713         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2714                         new->disk_len, 0,
2715                         backref->root_id, backref->inum,
2716                         new->file_pos); /* start - extent_offset */
2717         if (ret) {
2718                 btrfs_abort_transaction(trans, ret);
2719                 goto out_free_path;
2720         }
2721 
2722         ret = 1;
2723 out_free_path:
2724         btrfs_release_path(path);
2725         path->leave_spinning = 0;
2726         btrfs_end_transaction(trans);
2727 out_unlock:
2728         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2729                              &cached);
2730         iput(inode);
2731         return ret;
2732 }
2733 
2734 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2735 {
2736         struct old_sa_defrag_extent *old, *tmp;
2737 
2738         if (!new)
2739                 return;
2740 
2741         list_for_each_entry_safe(old, tmp, &new->head, list) {
2742                 kfree(old);
2743         }
2744         kfree(new);
2745 }
2746 
2747 static void relink_file_extents(struct new_sa_defrag_extent *new)
2748 {
2749         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2750         struct btrfs_path *path;
2751         struct sa_defrag_extent_backref *backref;
2752         struct sa_defrag_extent_backref *prev = NULL;
2753         struct inode *inode;
2754         struct rb_node *node;
2755         int ret;
2756 
2757         inode = new->inode;
2758 
2759         path = btrfs_alloc_path();
2760         if (!path)
2761                 return;
2762 
2763         if (!record_extent_backrefs(path, new)) {
2764                 btrfs_free_path(path);
2765                 goto out;
2766         }
2767         btrfs_release_path(path);
2768 
2769         while (1) {
2770                 node = rb_first(&new->root);
2771                 if (!node)
2772                         break;
2773                 rb_erase(node, &new->root);
2774 
2775                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2776 
2777                 ret = relink_extent_backref(path, prev, backref);
2778                 WARN_ON(ret < 0);
2779 
2780                 kfree(prev);
2781 
2782                 if (ret == 1)
2783                         prev = backref;
2784                 else
2785                         prev = NULL;
2786                 cond_resched();
2787         }
2788         kfree(prev);
2789 
2790         btrfs_free_path(path);
2791 out:
2792         free_sa_defrag_extent(new);
2793 
2794         atomic_dec(&fs_info->defrag_running);
2795         wake_up(&fs_info->transaction_wait);
2796 }
2797 
2798 static struct new_sa_defrag_extent *
2799 record_old_file_extents(struct inode *inode,
2800                         struct btrfs_ordered_extent *ordered)
2801 {
2802         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2803         struct btrfs_root *root = BTRFS_I(inode)->root;
2804         struct btrfs_path *path;
2805         struct btrfs_key key;
2806         struct old_sa_defrag_extent *old;
2807         struct new_sa_defrag_extent *new;
2808         int ret;
2809 
2810         new = kmalloc(sizeof(*new), GFP_NOFS);
2811         if (!new)
2812                 return NULL;
2813 
2814         new->inode = inode;
2815         new->file_pos = ordered->file_offset;
2816         new->len = ordered->len;
2817         new->bytenr = ordered->start;
2818         new->disk_len = ordered->disk_len;
2819         new->compress_type = ordered->compress_type;
2820         new->root = RB_ROOT;
2821         INIT_LIST_HEAD(&new->head);
2822 
2823         path = btrfs_alloc_path();
2824         if (!path)
2825                 goto out_kfree;
2826 
2827         key.objectid = btrfs_ino(BTRFS_I(inode));
2828         key.type = BTRFS_EXTENT_DATA_KEY;
2829         key.offset = new->file_pos;
2830 
2831         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2832         if (ret < 0)
2833                 goto out_free_path;
2834         if (ret > 0 && path->slots[0] > 0)
2835                 path->slots[0]--;
2836 
2837         /* find out all the old extents for the file range */
2838         while (1) {
2839                 struct btrfs_file_extent_item *extent;
2840                 struct extent_buffer *l;
2841                 int slot;
2842                 u64 num_bytes;
2843                 u64 offset;
2844                 u64 end;
2845                 u64 disk_bytenr;
2846                 u64 extent_offset;
2847 
2848                 l = path->nodes[0];
2849                 slot = path->slots[0];
2850 
2851                 if (slot >= btrfs_header_nritems(l)) {
2852                         ret = btrfs_next_leaf(root, path);
2853                         if (ret < 0)
2854                                 goto out_free_path;
2855                         else if (ret > 0)
2856                                 break;
2857                         continue;
2858                 }
2859 
2860                 btrfs_item_key_to_cpu(l, &key, slot);
2861 
2862                 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
2863                         break;
2864                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2865                         break;
2866                 if (key.offset >= new->file_pos + new->len)
2867                         break;
2868 
2869                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2870 
2871                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2872                 if (key.offset + num_bytes < new->file_pos)
2873                         goto next;
2874 
2875                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2876                 if (!disk_bytenr)
2877                         goto next;
2878 
2879                 extent_offset = btrfs_file_extent_offset(l, extent);
2880 
2881                 old = kmalloc(sizeof(*old), GFP_NOFS);
2882                 if (!old)
2883                         goto out_free_path;
2884 
2885                 offset = max(new->file_pos, key.offset);
2886                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2887 
2888                 old->bytenr = disk_bytenr;
2889                 old->extent_offset = extent_offset;
2890                 old->offset = offset - key.offset;
2891                 old->len = end - offset;
2892                 old->new = new;
2893                 old->count = 0;
2894                 list_add_tail(&old->list, &new->head);
2895 next:
2896                 path->slots[0]++;
2897                 cond_resched();
2898         }
2899 
2900         btrfs_free_path(path);
2901         atomic_inc(&fs_info->defrag_running);
2902 
2903         return new;
2904 
2905 out_free_path:
2906         btrfs_free_path(path);
2907 out_kfree:
2908         free_sa_defrag_extent(new);
2909         return NULL;
2910 }
2911 
2912 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2913                                          u64 start, u64 len)
2914 {
2915         struct btrfs_block_group_cache *cache;
2916 
2917         cache = btrfs_lookup_block_group(fs_info, start);
2918         ASSERT(cache);
2919 
2920         spin_lock(&cache->lock);
2921         cache->delalloc_bytes -= len;
2922         spin_unlock(&cache->lock);
2923 
2924         btrfs_put_block_group(cache);
2925 }
2926 
2927 /* as ordered data IO finishes, this gets called so we can finish
2928  * an ordered extent if the range of bytes in the file it covers are
2929  * fully written.
2930  */
2931 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2932 {
2933         struct inode *inode = ordered_extent->inode;
2934         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2935         struct btrfs_root *root = BTRFS_I(inode)->root;
2936         struct btrfs_trans_handle *trans = NULL;
2937         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2938         struct extent_state *cached_state = NULL;
2939         struct new_sa_defrag_extent *new = NULL;
2940         int compress_type = 0;
2941         int ret = 0;
2942         u64 logical_len = ordered_extent->len;
2943         bool nolock;
2944         bool truncated = false;
2945         bool range_locked = false;
2946         bool clear_new_delalloc_bytes = false;
2947 
2948         if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2949             !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
2950             !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
2951                 clear_new_delalloc_bytes = true;
2952 
2953         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
2954 
2955         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2956                 ret = -EIO;
2957                 goto out;
2958         }
2959 
2960         btrfs_free_io_failure_record(BTRFS_I(inode),
2961                         ordered_extent->file_offset,
2962                         ordered_extent->file_offset +
2963                         ordered_extent->len - 1);
2964 
2965         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2966                 truncated = true;
2967                 logical_len = ordered_extent->truncated_len;
2968                 /* Truncated the entire extent, don't bother adding */
2969                 if (!logical_len)
2970                         goto out;
2971         }
2972 
2973         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2974                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2975 
2976                 /*
2977                  * For mwrite(mmap + memset to write) case, we still reserve
2978                  * space for NOCOW range.
2979                  * As NOCOW won't cause a new delayed ref, just free the space
2980                  */
2981                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
2982                                        ordered_extent->len);
2983                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2984                 if (nolock)
2985                         trans = btrfs_join_transaction_nolock(root);
2986                 else
2987                         trans = btrfs_join_transaction(root);
2988                 if (IS_ERR(trans)) {
2989                         ret = PTR_ERR(trans);
2990                         trans = NULL;
2991                         goto out;
2992                 }
2993                 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
2994                 ret = btrfs_update_inode_fallback(trans, root, inode);
2995                 if (ret) /* -ENOMEM or corruption */
2996                         btrfs_abort_transaction(trans, ret);
2997                 goto out;
2998         }
2999 
3000         range_locked = true;
3001         lock_extent_bits(io_tree, ordered_extent->file_offset,
3002                          ordered_extent->file_offset + ordered_extent->len - 1,
3003                          &cached_state);
3004 
3005         ret = test_range_bit(io_tree, ordered_extent->file_offset,
3006                         ordered_extent->file_offset + ordered_extent->len - 1,
3007                         EXTENT_DEFRAG, 0, cached_state);
3008         if (ret) {
3009                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3010                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3011                         /* the inode is shared */
3012                         new = record_old_file_extents(inode, ordered_extent);
3013 
3014                 clear_extent_bit(io_tree, ordered_extent->file_offset,
3015                         ordered_extent->file_offset + ordered_extent->len - 1,
3016                         EXTENT_DEFRAG, 0, 0, &cached_state);
3017         }
3018 
3019         if (nolock)
3020                 trans = btrfs_join_transaction_nolock(root);
3021         else
3022                 trans = btrfs_join_transaction(root);
3023         if (IS_ERR(trans)) {
3024                 ret = PTR_ERR(trans);
3025                 trans = NULL;
3026                 goto out;
3027         }
3028 
3029         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3030 
3031         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3032                 compress_type = ordered_extent->compress_type;
3033         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3034                 BUG_ON(compress_type);
3035                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3036                                        ordered_extent->len);
3037                 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3038                                                 ordered_extent->file_offset,
3039                                                 ordered_extent->file_offset +
3040                                                 logical_len);
3041         } else {
3042                 BUG_ON(root == fs_info->tree_root);
3043                 ret = insert_reserved_file_extent(trans, inode,
3044                                                 ordered_extent->file_offset,
3045                                                 ordered_extent->start,
3046                                                 ordered_extent->disk_len,
3047                                                 logical_len, logical_len,
3048                                                 compress_type, 0, 0,
3049                                                 BTRFS_FILE_EXTENT_REG);
3050                 if (!ret)
3051                         btrfs_release_delalloc_bytes(fs_info,
3052                                                      ordered_extent->start,
3053                                                      ordered_extent->disk_len);
3054         }
3055         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
3056                            ordered_extent->file_offset, ordered_extent->len,
3057                            trans->transid);
3058         if (ret < 0) {
3059                 btrfs_abort_transaction(trans, ret);
3060                 goto out;
3061         }
3062 
3063         ret = add_pending_csums(trans, inode, &ordered_extent->list);
3064         if (ret) {
3065                 btrfs_abort_transaction(trans, ret);
3066                 goto out;
3067         }
3068 
3069         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3070         ret = btrfs_update_inode_fallback(trans, root, inode);
3071         if (ret) { /* -ENOMEM or corruption */
3072                 btrfs_abort_transaction(trans, ret);
3073                 goto out;
3074         }
3075         ret = 0;
3076 out:
3077         if (range_locked || clear_new_delalloc_bytes) {
3078                 unsigned int clear_bits = 0;
3079 
3080                 if (range_locked)
3081                         clear_bits |= EXTENT_LOCKED;
3082                 if (clear_new_delalloc_bytes)
3083                         clear_bits |= EXTENT_DELALLOC_NEW;
3084                 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3085                                  ordered_extent->file_offset,
3086                                  ordered_extent->file_offset +
3087                                  ordered_extent->len - 1,
3088                                  clear_bits,
3089                                  (clear_bits & EXTENT_LOCKED) ? 1 : 0,
3090                                  0, &cached_state);
3091         }
3092 
3093         if (trans)
3094                 btrfs_end_transaction(trans);
3095 
3096         if (ret || truncated) {
3097                 u64 start, end;
3098 
3099                 if (truncated)
3100                         start = ordered_extent->file_offset + logical_len;
3101                 else
3102                         start = ordered_extent->file_offset;
3103                 end = ordered_extent->file_offset + ordered_extent->len - 1;
3104                 clear_extent_uptodate(io_tree, start, end, NULL);
3105 
3106                 /* Drop the cache for the part of the extent we didn't write. */
3107                 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3108 
3109                 /*
3110                  * If the ordered extent had an IOERR or something else went
3111                  * wrong we need to return the space for this ordered extent
3112                  * back to the allocator.  We only free the extent in the
3113                  * truncated case if we didn't write out the extent at all.
3114                  */
3115                 if ((ret || !logical_len) &&
3116                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3117                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3118                         btrfs_free_reserved_extent(fs_info,
3119                                                    ordered_extent->start,
3120                                                    ordered_extent->disk_len, 1);
3121         }
3122 
3123 
3124         /*
3125          * This needs to be done to make sure anybody waiting knows we are done
3126          * updating everything for this ordered extent.
3127          */
3128         btrfs_remove_ordered_extent(inode, ordered_extent);
3129 
3130         /* for snapshot-aware defrag */
3131         if (new) {
3132                 if (ret) {
3133                         free_sa_defrag_extent(new);
3134                         atomic_dec(&fs_info->defrag_running);
3135                 } else {
3136                         relink_file_extents(new);
3137                 }
3138         }
3139 
3140         /* once for us */
3141         btrfs_put_ordered_extent(ordered_extent);
3142         /* once for the tree */
3143         btrfs_put_ordered_extent(ordered_extent);
3144 
3145         /* Try to release some metadata so we don't get an OOM but don't wait */
3146         btrfs_btree_balance_dirty_nodelay(fs_info);
3147 
3148         return ret;
3149 }
3150 
3151 static void finish_ordered_fn(struct btrfs_work *work)
3152 {
3153         struct btrfs_ordered_extent *ordered_extent;
3154         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3155         btrfs_finish_ordered_io(ordered_extent);
3156 }
3157 
3158 static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3159                                 struct extent_state *state, int uptodate)
3160 {
3161         struct inode *inode = page->mapping->host;
3162         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3163         struct btrfs_ordered_extent *ordered_extent = NULL;
3164         struct btrfs_workqueue *wq;
3165         btrfs_work_func_t func;
3166 
3167         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3168 
3169         ClearPagePrivate2(page);
3170         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3171                                             end - start + 1, uptodate))
3172                 return;
3173 
3174         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
3175                 wq = fs_info->endio_freespace_worker;
3176                 func = btrfs_freespace_write_helper;
3177         } else {
3178                 wq = fs_info->endio_write_workers;
3179                 func = btrfs_endio_write_helper;
3180         }
3181 
3182         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3183                         NULL);
3184         btrfs_queue_work(wq, &ordered_extent->work);
3185 }
3186 
3187 static int __readpage_endio_check(struct inode *inode,
3188                                   struct btrfs_io_bio *io_bio,
3189                                   int icsum, struct page *page,
3190                                   int pgoff, u64 start, size_t len)
3191 {
3192         char *kaddr;
3193         u32 csum_expected;
3194         u32 csum = ~(u32)0;
3195 
3196         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3197 
3198         kaddr = kmap_atomic(page);
3199         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3200         btrfs_csum_final(csum, (u8 *)&csum);
3201         if (csum != csum_expected)
3202                 goto zeroit;
3203 
3204         kunmap_atomic(kaddr);
3205         return 0;
3206 zeroit:
3207         btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3208                                     io_bio->mirror_num);
3209         memset(kaddr + pgoff, 1, len);
3210         flush_dcache_page(page);
3211         kunmap_atomic(kaddr);
3212         return -EIO;
3213 }
3214 
3215 /*
3216  * when reads are done, we need to check csums to verify the data is correct
3217  * if there's a match, we allow the bio to finish.  If not, the code in
3218  * extent_io.c will try to find good copies for us.
3219  */
3220 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3221                                       u64 phy_offset, struct page *page,
3222                                       u64 start, u64 end, int mirror)
3223 {
3224         size_t offset = start - page_offset(page);
3225         struct inode *inode = page->mapping->host;
3226         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3227         struct btrfs_root *root = BTRFS_I(inode)->root;
3228 
3229         if (PageChecked(page)) {
3230                 ClearPageChecked(page);
3231                 return 0;
3232         }
3233 
3234         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3235                 return 0;
3236 
3237         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3238             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3239                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3240                 return 0;
3241         }
3242 
3243         phy_offset >>= inode->i_sb->s_blocksize_bits;
3244         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3245                                       start, (size_t)(end - start + 1));
3246 }
3247 
3248 /*
3249  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3250  *
3251  * @inode: The inode we want to perform iput on
3252  *
3253  * This function uses the generic vfs_inode::i_count to track whether we should
3254  * just decrement it (in case it's > 1) or if this is the last iput then link
3255  * the inode to the delayed iput machinery. Delayed iputs are processed at
3256  * transaction commit time/superblock commit/cleaner kthread.
3257  */
3258 void btrfs_add_delayed_iput(struct inode *inode)
3259 {
3260         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3261         struct btrfs_inode *binode = BTRFS_I(inode);
3262 
3263         if (atomic_add_unless(&inode->i_count, -1, 1))
3264                 return;
3265 
3266         spin_lock(&fs_info->delayed_iput_lock);
3267         ASSERT(list_empty(&binode->delayed_iput));
3268         list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3269         spin_unlock(&fs_info->delayed_iput_lock);
3270 }
3271 
3272 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3273 {
3274 
3275         spin_lock(&fs_info->delayed_iput_lock);
3276         while (!list_empty(&fs_info->delayed_iputs)) {
3277                 struct btrfs_inode *inode;
3278 
3279                 inode = list_first_entry(&fs_info->delayed_iputs,
3280                                 struct btrfs_inode, delayed_iput);
3281                 list_del_init(&inode->delayed_iput);
3282                 spin_unlock(&fs_info->delayed_iput_lock);
3283                 iput(&inode->vfs_inode);
3284                 spin_lock(&fs_info->delayed_iput_lock);
3285         }
3286         spin_unlock(&fs_info->delayed_iput_lock);
3287 }
3288 
3289 /*
3290  * This creates an orphan entry for the given inode in case something goes wrong
3291  * in the middle of an unlink.
3292  */
3293 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3294                      struct btrfs_inode *inode)
3295 {
3296         int ret;
3297 
3298         ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3299         if (ret && ret != -EEXIST) {
3300                 btrfs_abort_transaction(trans, ret);
3301                 return ret;
3302         }
3303 
3304         return 0;
3305 }
3306 
3307 /*
3308  * We have done the delete so we can go ahead and remove the orphan item for
3309  * this particular inode.
3310  */
3311 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3312                             struct btrfs_inode *inode)
3313 {
3314         return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3315 }
3316 
3317 /*
3318  * this cleans up any orphans that may be left on the list from the last use
3319  * of this root.
3320  */
3321 int btrfs_orphan_cleanup(struct btrfs_root *root)
3322 {
3323         struct btrfs_fs_info *fs_info = root->fs_info;
3324         struct btrfs_path *path;
3325         struct extent_buffer *leaf;
3326         struct btrfs_key key, found_key;
3327         struct btrfs_trans_handle *trans;
3328         struct inode *inode;
3329         u64 last_objectid = 0;
3330         int ret = 0, nr_unlink = 0;
3331 
3332         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3333                 return 0;
3334 
3335         path = btrfs_alloc_path();
3336         if (!path) {
3337                 ret = -ENOMEM;
3338                 goto out;
3339         }
3340         path->reada = READA_BACK;
3341 
3342         key.objectid = BTRFS_ORPHAN_OBJECTID;
3343         key.type = BTRFS_ORPHAN_ITEM_KEY;
3344         key.offset = (u64)-1;
3345 
3346         while (1) {
3347                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3348                 if (ret < 0)
3349                         goto out;
3350 
3351                 /*
3352                  * if ret == 0 means we found what we were searching for, which
3353                  * is weird, but possible, so only screw with path if we didn't
3354                  * find the key and see if we have stuff that matches
3355                  */
3356                 if (ret > 0) {
3357                         ret = 0;
3358                         if (path->slots[0] == 0)
3359                                 break;
3360                         path->slots[0]--;
3361                 }
3362 
3363                 /* pull out the item */
3364                 leaf = path->nodes[0];
3365                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3366 
3367                 /* make sure the item matches what we want */
3368                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3369                         break;
3370                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3371                         break;
3372 
3373                 /* release the path since we're done with it */
3374                 btrfs_release_path(path);
3375 
3376                 /*
3377                  * this is where we are basically btrfs_lookup, without the
3378                  * crossing root thing.  we store the inode number in the
3379                  * offset of the orphan item.
3380                  */
3381 
3382                 if (found_key.offset == last_objectid) {
3383                         btrfs_err(fs_info,
3384                                   "Error removing orphan entry, stopping orphan cleanup");
3385                         ret = -EINVAL;
3386                         goto out;
3387                 }
3388 
3389                 last_objectid = found_key.offset;
3390 
3391                 found_key.objectid = found_key.offset;
3392                 found_key.type = BTRFS_INODE_ITEM_KEY;
3393                 found_key.offset = 0;
3394                 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
3395                 ret = PTR_ERR_OR_ZERO(inode);
3396                 if (ret && ret != -ENOENT)
3397                         goto out;
3398 
3399                 if (ret == -ENOENT && root == fs_info->tree_root) {
3400                         struct btrfs_root *dead_root;
3401                         struct btrfs_fs_info *fs_info = root->fs_info;
3402                         int is_dead_root = 0;
3403 
3404                         /*
3405                          * this is an orphan in the tree root. Currently these
3406                          * could come from 2 sources:
3407                          *  a) a snapshot deletion in progress
3408                          *  b) a free space cache inode
3409                          * We need to distinguish those two, as the snapshot
3410                          * orphan must not get deleted.
3411                          * find_dead_roots already ran before us, so if this
3412                          * is a snapshot deletion, we should find the root
3413                          * in the dead_roots list
3414                          */
3415                         spin_lock(&fs_info->trans_lock);
3416                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3417                                             root_list) {
3418                                 if (dead_root->root_key.objectid ==
3419                                     found_key.objectid) {
3420                                         is_dead_root = 1;
3421                                         break;
3422                                 }
3423                         }
3424                         spin_unlock(&fs_info->trans_lock);
3425                         if (is_dead_root) {
3426                                 /* prevent this orphan from being found again */
3427                                 key.offset = found_key.objectid - 1;
3428                                 continue;
3429                         }
3430 
3431                 }
3432 
3433                 /*
3434                  * If we have an inode with links, there are a couple of
3435                  * possibilities. Old kernels (before v3.12) used to create an
3436                  * orphan item for truncate indicating that there were possibly
3437                  * extent items past i_size that needed to be deleted. In v3.12,
3438                  * truncate was changed to update i_size in sync with the extent
3439                  * items, but the (useless) orphan item was still created. Since
3440                  * v4.18, we don't create the orphan item for truncate at all.
3441                  *
3442                  * So, this item could mean that we need to do a truncate, but
3443                  * only if this filesystem was last used on a pre-v3.12 kernel
3444                  * and was not cleanly unmounted. The odds of that are quite
3445                  * slim, and it's a pain to do the truncate now, so just delete
3446                  * the orphan item.
3447                  *
3448                  * It's also possible that this orphan item was supposed to be
3449                  * deleted but wasn't. The inode number may have been reused,
3450                  * but either way, we can delete the orphan item.
3451                  */
3452                 if (ret == -ENOENT || inode->i_nlink) {
3453                         if (!ret)
3454                                 iput(inode);
3455                         trans = btrfs_start_transaction(root, 1);
3456                         if (IS_ERR(trans)) {
3457                                 ret = PTR_ERR(trans);
3458                                 goto out;
3459                         }
3460                         btrfs_debug(fs_info, "auto deleting %Lu",
3461                                     found_key.objectid);
3462                         ret = btrfs_del_orphan_item(trans, root,
3463                                                     found_key.objectid);
3464                         btrfs_end_transaction(trans);
3465                         if (ret)
3466                                 goto out;
3467                         continue;
3468                 }
3469 
3470                 nr_unlink++;
3471 
3472                 /* this will do delete_inode and everything for us */
3473                 iput(inode);
3474                 if (ret)
3475                         goto out;
3476         }
3477         /* release the path since we're done with it */
3478         btrfs_release_path(path);
3479 
3480         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3481 
3482         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3483                 trans = btrfs_join_transaction(root);
3484                 if (!IS_ERR(trans))
3485                         btrfs_end_transaction(trans);
3486         }
3487 
3488         if (nr_unlink)
3489                 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3490 
3491 out:
3492         if (ret)
3493                 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3494         btrfs_free_path(path);
3495         return ret;
3496 }
3497 
3498 /*
3499  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3500  * don't find any xattrs, we know there can't be any acls.
3501  *
3502  * slot is the slot the inode is in, objectid is the objectid of the inode
3503  */
3504 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3505                                           int slot, u64 objectid,
3506                                           int *first_xattr_slot)
3507 {
3508         u32 nritems = btrfs_header_nritems(leaf);
3509         struct btrfs_key found_key;
3510         static u64 xattr_access = 0;
3511         static u64 xattr_default = 0;
3512         int scanned = 0;
3513 
3514         if (!xattr_access) {
3515                 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3516                                         strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3517                 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3518                                         strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3519         }
3520 
3521         slot++;
3522         *first_xattr_slot = -1;
3523         while (slot < nritems) {
3524                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3525 
3526                 /* we found a different objectid, there must not be acls */
3527                 if (found_key.objectid != objectid)
3528                         return 0;
3529 
3530                 /* we found an xattr, assume we've got an acl */
3531                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3532                         if (*first_xattr_slot == -1)
3533                                 *first_xattr_slot = slot;
3534                         if (found_key.offset == xattr_access ||
3535                             found_key.offset == xattr_default)
3536                                 return 1;
3537                 }
3538 
3539                 /*
3540                  * we found a key greater than an xattr key, there can't
3541                  * be any acls later on
3542                  */
3543                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3544                         return 0;
3545 
3546                 slot++;
3547                 scanned++;
3548 
3549                 /*
3550                  * it goes inode, inode backrefs, xattrs, extents,
3551                  * so if there are a ton of hard links to an inode there can
3552                  * be a lot of backrefs.  Don't waste time searching too hard,
3553                  * this is just an optimization
3554                  */
3555                 if (scanned >= 8)
3556                         break;
3557         }
3558         /* we hit the end of the leaf before we found an xattr or
3559          * something larger than an xattr.  We have to assume the inode
3560          * has acls
3561          */
3562         if (*first_xattr_slot == -1)
3563                 *first_xattr_slot = slot;
3564         return 1;
3565 }
3566 
3567 /*
3568  * read an inode from the btree into the in-memory inode
3569  */
3570 static int btrfs_read_locked_inode(struct inode *inode)
3571 {
3572         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3573         struct btrfs_path *path;
3574         struct extent_buffer *leaf;
3575         struct btrfs_inode_item *inode_item;
3576         struct btrfs_root *root = BTRFS_I(inode)->root;
3577         struct btrfs_key location;
3578         unsigned long ptr;
3579         int maybe_acls;
3580         u32 rdev;
3581         int ret;
3582         bool filled = false;
3583         int first_xattr_slot;
3584 
3585         ret = btrfs_fill_inode(inode, &rdev);
3586         if (!ret)
3587                 filled = true;
3588 
3589         path = btrfs_alloc_path();
3590         if (!path)
3591                 return -ENOMEM;
3592 
3593         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3594 
3595         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3596         if (ret) {
3597                 btrfs_free_path(path);
3598                 return ret;
3599         }
3600 
3601         leaf = path->nodes[0];
3602 
3603         if (filled)
3604                 goto cache_index;
3605 
3606         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3607                                     struct btrfs_inode_item);
3608         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3609         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3610         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3611         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3612         btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3613 
3614         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3615         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3616 
3617         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3618         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3619 
3620         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3621         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3622 
3623         BTRFS_I(inode)->i_otime.tv_sec =
3624                 btrfs_timespec_sec(leaf, &inode_item->otime);
3625         BTRFS_I(inode)->i_otime.tv_nsec =
3626                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3627 
3628         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3629         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3630         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3631 
3632         inode_set_iversion_queried(inode,
3633                                    btrfs_inode_sequence(leaf, inode_item));
3634         inode->i_generation = BTRFS_I(inode)->generation;
3635         inode->i_rdev = 0;
3636         rdev = btrfs_inode_rdev(leaf, inode_item);
3637 
3638         BTRFS_I(inode)->index_cnt = (u64)-1;
3639         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3640 
3641 cache_index:
3642         /*
3643          * If we were modified in the current generation and evicted from memory
3644          * and then re-read we need to do a full sync since we don't have any
3645          * idea about which extents were modified before we were evicted from
3646          * cache.
3647          *
3648          * This is required for both inode re-read from disk and delayed inode
3649          * in delayed_nodes_tree.
3650          */
3651         if (BTRFS_I(inode)->last_trans == fs_info->generation)
3652                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3653                         &BTRFS_I(inode)->runtime_flags);
3654 
3655         /*
3656          * We don't persist the id of the transaction where an unlink operation
3657          * against the inode was last made. So here we assume the inode might
3658          * have been evicted, and therefore the exact value of last_unlink_trans
3659          * lost, and set it to last_trans to avoid metadata inconsistencies
3660          * between the inode and its parent if the inode is fsync'ed and the log
3661          * replayed. For example, in the scenario:
3662          *
3663          * touch mydir/foo
3664          * ln mydir/foo mydir/bar
3665          * sync
3666          * unlink mydir/bar
3667          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3668          * xfs_io -c fsync mydir/foo
3669          * <power failure>
3670          * mount fs, triggers fsync log replay
3671          *
3672          * We must make sure that when we fsync our inode foo we also log its
3673          * parent inode, otherwise after log replay the parent still has the
3674          * dentry with the "bar" name but our inode foo has a link count of 1
3675          * and doesn't have an inode ref with the name "bar" anymore.
3676          *
3677          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3678          * but it guarantees correctness at the expense of occasional full
3679          * transaction commits on fsync if our inode is a directory, or if our
3680          * inode is not a directory, logging its parent unnecessarily.
3681          */
3682         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3683 
3684         path->slots[0]++;
3685         if (inode->i_nlink != 1 ||
3686             path->slots[0] >= btrfs_header_nritems(leaf))
3687                 goto cache_acl;
3688 
3689         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3690         if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3691                 goto cache_acl;
3692 
3693         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3694         if (location.type == BTRFS_INODE_REF_KEY) {
3695                 struct btrfs_inode_ref *ref;
3696 
3697                 ref = (struct btrfs_inode_ref *)ptr;
3698                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3699         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3700                 struct btrfs_inode_extref *extref;
3701 
3702                 extref = (struct btrfs_inode_extref *)ptr;
3703                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3704                                                                      extref);
3705         }
3706 cache_acl:
3707         /*
3708          * try to precache a NULL acl entry for files that don't have
3709          * any xattrs or acls
3710          */
3711         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3712                         btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3713         if (first_xattr_slot != -1) {
3714                 path->slots[0] = first_xattr_slot;
3715                 ret = btrfs_load_inode_props(inode, path);
3716                 if (ret)
3717                         btrfs_err(fs_info,
3718                                   "error loading props for ino %llu (root %llu): %d",
3719                                   btrfs_ino(BTRFS_I(inode)),
3720                                   root->root_key.objectid, ret);
3721         }
3722         btrfs_free_path(path);
3723 
3724         if (!maybe_acls)
3725                 cache_no_acl(inode);
3726 
3727         switch (inode->i_mode & S_IFMT) {
3728         case S_IFREG:
3729                 inode->i_mapping->a_ops = &btrfs_aops;
3730                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3731                 inode->i_fop = &btrfs_file_operations;
3732                 inode->i_op = &btrfs_file_inode_operations;
3733                 break;
3734         case S_IFDIR:
3735                 inode->i_fop = &btrfs_dir_file_operations;
3736                 inode->i_op = &btrfs_dir_inode_operations;
3737                 break;
3738         case S_IFLNK:
3739                 inode->i_op = &btrfs_symlink_inode_operations;
3740                 inode_nohighmem(inode);
3741                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3742                 break;
3743         default:
3744                 inode->i_op = &btrfs_special_inode_operations;
3745                 init_special_inode(inode, inode->i_mode, rdev);
3746                 break;
3747         }
3748 
3749         btrfs_sync_inode_flags_to_i_flags(inode);
3750         return 0;
3751 }
3752 
3753 /*
3754  * given a leaf and an inode, copy the inode fields into the leaf
3755  */
3756 static void fill_inode_item(struct btrfs_trans_handle *trans,
3757                             struct extent_buffer *leaf,
3758                             struct btrfs_inode_item *item,
3759                             struct inode *inode)
3760 {
3761         struct btrfs_map_token token;
3762 
3763         btrfs_init_map_token(&token);
3764 
3765         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3766         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3767         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3768                                    &token);
3769         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3770         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3771 
3772         btrfs_set_token_timespec_sec(leaf, &item->atime,
3773                                      inode->i_atime.tv_sec, &token);
3774         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3775                                       inode->i_atime.tv_nsec, &token);
3776 
3777         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3778                                      inode->i_mtime.tv_sec, &token);
3779         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3780                                       inode->i_mtime.tv_nsec, &token);
3781 
3782         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3783                                      inode->i_ctime.tv_sec, &token);
3784         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3785                                       inode->i_ctime.tv_nsec, &token);
3786 
3787         btrfs_set_token_timespec_sec(leaf, &item->otime,
3788                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3789         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3790                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3791 
3792         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3793                                      &token);
3794         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3795                                          &token);
3796         btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
3797                                        &token);
3798         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3799         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3800         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3801         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3802 }
3803 
3804 /*
3805  * copy everything in the in-memory inode into the btree.
3806  */
3807 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3808                                 struct btrfs_root *root, struct inode *inode)
3809 {
3810         struct btrfs_inode_item *inode_item;
3811         struct btrfs_path *path;
3812         struct extent_buffer *leaf;
3813         int ret;
3814 
3815         path = btrfs_alloc_path();
3816         if (!path)
3817                 return -ENOMEM;
3818 
3819         path->leave_spinning = 1;
3820         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3821                                  1);
3822         if (ret) {
3823                 if (ret > 0)
3824                         ret = -ENOENT;
3825                 goto failed;
3826         }
3827 
3828         leaf = path->nodes[0];
3829         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3830                                     struct btrfs_inode_item);
3831 
3832         fill_inode_item(trans, leaf, inode_item, inode);
3833         btrfs_mark_buffer_dirty(leaf);
3834         btrfs_set_inode_last_trans(trans, inode);
3835         ret = 0;
3836 failed:
3837         btrfs_free_path(path);
3838         return ret;
3839 }
3840 
3841 /*
3842  * copy everything in the in-memory inode into the btree.
3843  */
3844 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3845                                 struct btrfs_root *root, struct inode *inode)
3846 {
3847         struct btrfs_fs_info *fs_info = root->fs_info;
3848         int ret;
3849 
3850         /*
3851          * If the inode is a free space inode, we can deadlock during commit
3852          * if we put it into the delayed code.
3853          *
3854          * The data relocation inode should also be directly updated
3855          * without delay
3856          */
3857         if (!btrfs_is_free_space_inode(BTRFS_I(inode))
3858             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3859             && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
3860                 btrfs_update_root_times(trans, root);
3861 
3862                 ret = btrfs_delayed_update_inode(trans, root, inode);
3863                 if (!ret)
3864                         btrfs_set_inode_last_trans(trans, inode);
3865                 return ret;
3866         }
3867 
3868         return btrfs_update_inode_item(trans, root, inode);
3869 }
3870 
3871 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3872                                          struct btrfs_root *root,
3873                                          struct inode *inode)
3874 {
3875         int ret;
3876 
3877         ret = btrfs_update_inode(trans, root, inode);
3878         if (ret == -ENOSPC)
3879                 return btrfs_update_inode_item(trans, root, inode);
3880         return ret;
3881 }
3882 
3883 /*
3884  * unlink helper that gets used here in inode.c and in the tree logging
3885  * recovery code.  It remove a link in a directory with a given name, and
3886  * also drops the back refs in the inode to the directory
3887  */
3888 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3889                                 struct btrfs_root *root,
3890                                 struct btrfs_inode *dir,
3891                                 struct btrfs_inode *inode,
3892                                 const char *name, int name_len)
3893 {
3894         struct btrfs_fs_info *fs_info = root->fs_info;
3895         struct btrfs_path *path;
3896         int ret = 0;
3897         struct extent_buffer *leaf;
3898         struct btrfs_dir_item *di;
3899         struct btrfs_key key;
3900         u64 index;
3901         u64 ino = btrfs_ino(inode);
3902         u64 dir_ino = btrfs_ino(dir);
3903 
3904         path = btrfs_alloc_path();
3905         if (!path) {
3906                 ret = -ENOMEM;
3907                 goto out;
3908         }
3909 
3910         path->leave_spinning = 1;
3911         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3912                                     name, name_len, -1);
3913         if (IS_ERR(di)) {
3914                 ret = PTR_ERR(di);
3915                 goto err;
3916         }
3917         if (!di) {
3918                 ret = -ENOENT;
3919                 goto err;
3920         }
3921         leaf = path->nodes[0];
3922         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3923         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3924         if (ret)
3925                 goto err;
3926         btrfs_release_path(path);
3927 
3928         /*
3929          * If we don't have dir index, we have to get it by looking up
3930          * the inode ref, since we get the inode ref, remove it directly,
3931          * it is unnecessary to do delayed deletion.
3932          *
3933          * But if we have dir index, needn't search inode ref to get it.
3934          * Since the inode ref is close to the inode item, it is better
3935          * that we delay to delete it, and just do this deletion when
3936          * we update the inode item.
3937          */
3938         if (inode->dir_index) {
3939                 ret = btrfs_delayed_delete_inode_ref(inode);
3940                 if (!ret) {
3941                         index = inode->dir_index;
3942                         goto skip_backref;
3943                 }
3944         }
3945 
3946         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3947                                   dir_ino, &index);
3948         if (ret) {
3949                 btrfs_info(fs_info,
3950                         "failed to delete reference to %.*s, inode %llu parent %llu",
3951                         name_len, name, ino, dir_ino);
3952                 btrfs_abort_transaction(trans, ret);
3953                 goto err;
3954         }
3955 skip_backref:
3956         ret = btrfs_delete_delayed_dir_index(trans, dir, index);
3957         if (ret) {
3958                 btrfs_abort_transaction(trans, ret);
3959                 goto err;
3960         }
3961 
3962         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
3963                         dir_ino);
3964         if (ret != 0 && ret != -ENOENT) {
3965                 btrfs_abort_transaction(trans, ret);
3966                 goto err;
3967         }
3968 
3969         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
3970                         index);
3971         if (ret == -ENOENT)
3972                 ret = 0;
3973         else if (ret)
3974                 btrfs_abort_transaction(trans, ret);
3975 err:
3976         btrfs_free_path(path);
3977         if (ret)
3978                 goto out;
3979 
3980         btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
3981         inode_inc_iversion(&inode->vfs_inode);
3982         inode_inc_iversion(&dir->vfs_inode);
3983         inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
3984                 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
3985         ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
3986 out:
3987         return ret;
3988 }
3989 
3990 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3991                        struct btrfs_root *root,
3992                        struct btrfs_inode *dir, struct btrfs_inode *inode,
3993                        const char *name, int name_len)
3994 {
3995         int ret;
3996         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3997         if (!ret) {
3998                 drop_nlink(&inode->vfs_inode);
3999                 ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
4000         }
4001         return ret;
4002 }
4003 
4004 /*
4005  * helper to start transaction for unlink and rmdir.
4006  *
4007  * unlink and rmdir are special in btrfs, they do not always free space, so
4008  * if we cannot make our reservations the normal way try and see if there is
4009  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4010  * allow the unlink to occur.
4011  */
4012 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4013 {
4014         struct btrfs_root *root = BTRFS_I(dir)->root;
4015 
4016         /*
4017          * 1 for the possible orphan item
4018          * 1 for the dir item
4019          * 1 for the dir index
4020          * 1 for the inode ref
4021          * 1 for the inode
4022          */
4023         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4024 }
4025 
4026 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4027 {
4028         struct btrfs_root *root = BTRFS_I(dir)->root;
4029         struct btrfs_trans_handle *trans;
4030         struct inode *inode = d_inode(dentry);
4031         int ret;
4032 
4033         trans = __unlink_start_trans(dir);
4034         if (IS_ERR(trans))
4035                 return PTR_ERR(trans);
4036 
4037         btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4038                         0);
4039 
4040         ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4041                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4042                         dentry->d_name.len);
4043         if (ret)
4044                 goto out;
4045 
4046         if (inode->i_nlink == 0) {
4047                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4048                 if (ret)
4049                         goto out;
4050         }
4051 
4052 out:
4053         btrfs_end_transaction(trans);
4054         btrfs_btree_balance_dirty(root->fs_info);
4055         return ret;
4056 }
4057 
4058 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4059                                struct inode *dir, u64 objectid,
4060                                const char *name, int name_len)
4061 {
4062         struct btrfs_root *root = BTRFS_I(dir)->root;
4063         struct btrfs_path *path;
4064         struct extent_buffer *leaf;
4065         struct btrfs_dir_item *di;
4066         struct btrfs_key key;
4067         u64 index;
4068         int ret;
4069         u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4070 
4071         path = btrfs_alloc_path();
4072         if (!path)
4073                 return -ENOMEM;
4074 
4075         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4076                                    name, name_len, -1);
4077         if (IS_ERR_OR_NULL(di)) {
4078                 if (!di)
4079                         ret = -ENOENT;
4080                 else
4081                         ret = PTR_ERR(di);
4082                 goto out;
4083         }
4084 
4085         leaf = path->nodes[0];
4086         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4087         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4088         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4089         if (ret) {
4090                 btrfs_abort_transaction(trans, ret);
4091                 goto out;
4092         }
4093         btrfs_release_path(path);
4094 
4095         ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
4096                                  dir_ino, &index, name, name_len);
4097         if (ret < 0) {
4098                 if (ret != -ENOENT) {
4099                         btrfs_abort_transaction(trans, ret);
4100                         goto out;
4101                 }
4102                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4103                                                  name, name_len);
4104                 if (IS_ERR_OR_NULL(di)) {
4105                         if (!di)
4106                                 ret = -ENOENT;
4107                         else
4108                                 ret = PTR_ERR(di);
4109                         btrfs_abort_transaction(trans, ret);
4110                         goto out;
4111                 }
4112 
4113                 leaf = path->nodes[0];
4114                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4115                 index = key.offset;
4116         }
4117         btrfs_release_path(path);
4118 
4119         ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4120         if (ret) {
4121                 btrfs_abort_transaction(trans, ret);
4122                 goto out;
4123         }
4124 
4125         btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4126         inode_inc_iversion(dir);
4127         dir->i_mtime = dir->i_ctime = current_time(dir);
4128         ret = btrfs_update_inode_fallback(trans, root, dir);
4129         if (ret)
4130                 btrfs_abort_transaction(trans, ret);
4131 out:
4132         btrfs_free_path(path);
4133         return ret;
4134 }
4135 
4136 /*
4137  * Helper to check if the subvolume references other subvolumes or if it's
4138  * default.
4139  */
4140 static noinline int may_destroy_subvol(struct btrfs_root *root)
4141 {
4142         struct btrfs_fs_info *fs_info = root->fs_info;
4143         struct btrfs_path *path;
4144         struct btrfs_dir_item *di;
4145         struct btrfs_key key;
4146         u64 dir_id;
4147         int ret;
4148 
4149         path = btrfs_alloc_path();
4150         if (!path)
4151                 return -ENOMEM;
4152 
4153         /* Make sure this root isn't set as the default subvol */
4154         dir_id = btrfs_super_root_dir(fs_info->super_copy);
4155         di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4156                                    dir_id, "default", 7, 0);
4157         if (di && !IS_ERR(di)) {
4158                 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4159                 if (key.objectid == root->root_key.objectid) {
4160                         ret = -EPERM;
4161                         btrfs_err(fs_info,
4162                                   "deleting default subvolume %llu is not allowed",
4163                                   key.objectid);
4164                         goto out;
4165                 }
4166                 btrfs_release_path(path);
4167         }
4168 
4169         key.objectid = root->root_key.objectid;
4170         key.type = BTRFS_ROOT_REF_KEY;
4171         key.offset = (u64)-1;
4172 
4173         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4174         if (ret < 0)
4175                 goto out;
4176         BUG_ON(ret == 0);
4177 
4178         ret = 0;
4179         if (path->slots[0] > 0) {
4180                 path->slots[0]--;
4181                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4182                 if (key.objectid == root->root_key.objectid &&
4183                     key.type == BTRFS_ROOT_REF_KEY)
4184                         ret = -ENOTEMPTY;
4185         }
4186 out:
4187         btrfs_free_path(path);
4188         return ret;
4189 }
4190 
4191 /* Delete all dentries for inodes belonging to the root */
4192 static void btrfs_prune_dentries(struct btrfs_root *root)
4193 {
4194         struct btrfs_fs_info *fs_info = root->fs_info;
4195         struct rb_node *node;
4196         struct rb_node *prev;
4197         struct btrfs_inode *entry;
4198         struct inode *inode;
4199         u64 objectid = 0;
4200 
4201         if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
4202                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4203 
4204         spin_lock(&root->inode_lock);
4205 again:
4206         node = root->inode_tree.rb_node;
4207         prev = NULL;
4208         while (node) {
4209                 prev = node;
4210                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4211 
4212                 if (objectid < btrfs_ino(entry))
4213                         node = node->rb_left;
4214                 else if (objectid > btrfs_ino(entry))
4215                         node = node->rb_right;
4216                 else
4217                         break;
4218         }
4219         if (!node) {
4220                 while (prev) {
4221                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
4222                         if (objectid <= btrfs_ino(entry)) {
4223                                 node = prev;
4224                                 break;
4225                         }
4226                         prev = rb_next(prev);
4227                 }
4228         }
4229         while (node) {
4230                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4231                 objectid = btrfs_ino(entry) + 1;
4232                 inode = igrab(&entry->vfs_inode);
4233                 if (inode) {
4234                         spin_unlock(&root->inode_lock);
4235                         if (atomic_read(&inode->i_count) > 1)
4236                                 d_prune_aliases(inode);
4237                         /*
4238                          * btrfs_drop_inode will have it removed from the inode
4239                          * cache when its usage count hits zero.
4240                          */
4241                         iput(inode);
4242                         cond_resched();
4243                         spin_lock(&root->inode_lock);
4244                         goto again;
4245                 }
4246 
4247                 if (cond_resched_lock(&root->inode_lock))
4248                         goto again;
4249 
4250                 node = rb_next(node);
4251         }
4252         spin_unlock(&root->inode_lock);
4253 }
4254 
4255 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4256 {
4257         struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4258         struct btrfs_root *root = BTRFS_I(dir)->root;
4259         struct inode *inode = d_inode(dentry);
4260         struct btrfs_root *dest = BTRFS_I(inode)->root;
4261         struct btrfs_trans_handle *trans;
4262         struct btrfs_block_rsv block_rsv;
4263         u64 root_flags;
4264         int ret;
4265         int err;
4266 
4267         /*
4268          * Don't allow to delete a subvolume with send in progress. This is
4269          * inside the inode lock so the error handling that has to drop the bit
4270          * again is not run concurrently.
4271          */
4272         spin_lock(&dest->root_item_lock);
4273         root_flags = btrfs_root_flags(&dest->root_item);
4274         if (dest->send_in_progress == 0) {
4275                 btrfs_set_root_flags(&dest->root_item,
4276                                 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4277                 spin_unlock(&dest->root_item_lock);
4278         } else {
4279                 spin_unlock(&dest->root_item_lock);
4280                 btrfs_warn(fs_info,
4281                            "attempt to delete subvolume %llu during send",
4282                            dest->root_key.objectid);
4283                 return -EPERM;
4284         }
4285 
4286         down_write(&fs_info->subvol_sem);
4287 
4288         err = may_destroy_subvol(dest);
4289         if (err)
4290                 goto out_up_write;
4291 
4292         btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4293         /*
4294          * One for dir inode,
4295          * two for dir entries,
4296          * two for root ref/backref.
4297          */
4298         err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4299         if (err)
4300                 goto out_up_write;
4301 
4302         trans = btrfs_start_transaction(root, 0);
4303         if (IS_ERR(trans)) {
4304                 err = PTR_ERR(trans);
4305                 goto out_release;
4306         }
4307         trans->block_rsv = &block_rsv;
4308         trans->bytes_reserved = block_rsv.size;
4309 
4310         btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4311 
4312         ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
4313                                   dentry->d_name.name, dentry->d_name.len);
4314         if (ret) {
4315                 err = ret;
4316                 btrfs_abort_transaction(trans, ret);
4317                 goto out_end_trans;
4318         }
4319 
4320         btrfs_record_root_in_trans(trans, dest);
4321 
4322         memset(&dest->root_item.drop_progress, 0,
4323                 sizeof(dest->root_item.drop_progress));
4324         dest->root_item.drop_level = 0;
4325         btrfs_set_root_refs(&dest->root_item, 0);
4326 
4327         if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4328                 ret = btrfs_insert_orphan_item(trans,
4329                                         fs_info->tree_root,
4330                                         dest->root_key.objectid);
4331                 if (ret) {
4332                         btrfs_abort_transaction(trans, ret);
4333                         err = ret;
4334                         goto out_end_trans;
4335                 }
4336         }
4337 
4338         ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4339                                   BTRFS_UUID_KEY_SUBVOL,
4340                                   dest->root_key.objectid);
4341         if (ret && ret != -ENOENT) {
4342                 btrfs_abort_transaction(trans, ret);
4343                 err = ret;
4344                 goto out_end_trans;
4345         }
4346         if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4347                 ret = btrfs_uuid_tree_remove(trans,
4348                                           dest->root_item.received_uuid,
4349                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4350                                           dest->root_key.objectid);
4351                 if (ret && ret != -ENOENT) {
4352                         btrfs_abort_transaction(trans, ret);
4353                         err = ret;
4354                         goto out_end_trans;
4355                 }
4356         }
4357 
4358 out_end_trans:
4359         trans->block_rsv = NULL;
4360         trans->bytes_reserved = 0;
4361         ret = btrfs_end_transaction(trans);
4362         if (ret && !err)
4363                 err = ret;
4364         inode->i_flags |= S_DEAD;
4365 out_release:
4366         btrfs_subvolume_release_metadata(fs_info, &block_rsv);
4367 out_up_write:
4368         up_write(&fs_info->subvol_sem);
4369         if (err) {
4370                 spin_lock(&dest->root_item_lock);
4371                 root_flags = btrfs_root_flags(&dest->root_item);
4372                 btrfs_set_root_flags(&dest->root_item,
4373                                 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4374                 spin_unlock(&dest->root_item_lock);
4375         } else {
4376                 d_invalidate(dentry);
4377                 btrfs_prune_dentries(dest);
4378                 ASSERT(dest->send_in_progress == 0);
4379 
4380                 /* the last ref */
4381                 if (dest->ino_cache_inode) {
4382                         iput(dest->ino_cache_inode);
4383                         dest->ino_cache_inode = NULL;
4384                 }
4385         }
4386 
4387         return err;
4388 }
4389 
4390 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4391 {
4392         struct inode *inode = d_inode(dentry);
4393         int err = 0;
4394         struct btrfs_root *root = BTRFS_I(dir)->root;
4395         struct btrfs_trans_handle *trans;
4396         u64 last_unlink_trans;
4397 
4398         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4399                 return -ENOTEMPTY;
4400         if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4401                 return btrfs_delete_subvolume(dir, dentry);
4402 
4403         trans = __unlink_start_trans(dir);
4404         if (IS_ERR(trans))
4405                 return PTR_ERR(trans);
4406 
4407         if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4408                 err = btrfs_unlink_subvol(trans, dir,
4409                                           BTRFS_I(inode)->location.objectid,
4410                                           dentry->d_name.name,
4411                                           dentry->d_name.len);
4412                 goto out;
4413         }
4414 
4415         err = btrfs_orphan_add(trans, BTRFS_I(inode));
4416         if (err)
4417                 goto out;
4418 
4419         last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4420 
4421         /* now the directory is empty */
4422         err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4423                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4424                         dentry->d_name.len);
4425         if (!err) {
4426                 btrfs_i_size_write(BTRFS_I(inode), 0);
4427                 /*
4428                  * Propagate the last_unlink_trans value of the deleted dir to
4429                  * its parent directory. This is to prevent an unrecoverable
4430                  * log tree in the case we do something like this:
4431                  * 1) create dir foo
4432                  * 2) create snapshot under dir foo
4433                  * 3) delete the snapshot
4434                  * 4) rmdir foo
4435                  * 5) mkdir foo
4436                  * 6) fsync foo or some file inside foo
4437                  */
4438                 if (last_unlink_trans >= trans->transid)
4439                         BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4440         }
4441 out:
4442         btrfs_end_transaction(trans);
4443         btrfs_btree_balance_dirty(root->fs_info);
4444 
4445         return err;
4446 }
4447 
4448 static int truncate_space_check(struct btrfs_trans_handle *trans,
4449                                 struct btrfs_root *root,
4450                                 u64 bytes_deleted)
4451 {
4452         struct btrfs_fs_info *fs_info = root->fs_info;
4453         int ret;
4454 
4455         /*
4456          * This is only used to apply pressure to the enospc system, we don't
4457          * intend to use this reservation at all.
4458          */
4459         bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
4460         bytes_deleted *= fs_info->nodesize;
4461         ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
4462                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4463         if (!ret) {
4464                 trace_btrfs_space_reservation(fs_info, "transaction",
4465                                               trans->transid,
4466                                               bytes_deleted, 1);
4467                 trans->bytes_reserved += bytes_deleted;
4468         }
4469         return ret;
4470 
4471 }
4472 
4473 /*
4474  * Return this if we need to call truncate_block for the last bit of the
4475  * truncate.
4476  */
4477 #define NEED_TRUNCATE_BLOCK 1
4478 
4479 /*
4480  * this can truncate away extent items, csum items and directory items.
4481  * It starts at a high offset and removes keys until it can't find
4482  * any higher than new_size
4483  *
4484  * csum items that cross the new i_size are truncated to the new size
4485  * as well.
4486  *
4487  * min_type is the minimum key type to truncate down to.  If set to 0, this
4488  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4489  */
4490 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4491                                struct btrfs_root *root,
4492                                struct inode *inode,
4493                                u64 new_size, u32 min_type)
4494 {
4495         struct btrfs_fs_info *fs_info = root->fs_info;
4496         struct btrfs_path *path;
4497         struct extent_buffer *leaf;
4498         struct btrfs_file_extent_item *fi;
4499         struct btrfs_key key;
4500         struct btrfs_key found_key;
4501         u64 extent_start = 0;
4502         u64 extent_num_bytes = 0;
4503         u64 extent_offset = 0;
4504         u64 item_end = 0;
4505         u64 last_size = new_size;
4506         u32 found_type = (u8)-1;
4507         int found_extent;
4508         int del_item;
4509         int pending_del_nr = 0;
4510         int pending_del_slot = 0;
4511         int extent_type = -1;
4512         int ret;
4513         u64 ino = btrfs_ino(BTRFS_I(inode));
4514         u64 bytes_deleted = 0;
4515         bool be_nice = false;
4516         bool should_throttle = false;
4517         bool should_end = false;
4518 
4519         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4520 
4521         /*
4522          * for non-free space inodes and ref cows, we want to back off from
4523          * time to time
4524          */
4525         if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4526             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4527                 be_nice = true;
4528 
4529         path = btrfs_alloc_path();
4530         if (!path)
4531                 return -ENOMEM;
4532         path->reada = READA_BACK;
4533 
4534         /*
4535          * We want to drop from the next block forward in case this new size is
4536          * not block aligned since we will be keeping the last block of the
4537          * extent just the way it is.
4538          */
4539         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4540             root == fs_info->tree_root)
4541                 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4542                                         fs_info->sectorsize),
4543                                         (u64)-1, 0);
4544 
4545         /*
4546          * This function is also used to drop the items in the log tree before
4547          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4548          * it is used to drop the loged items. So we shouldn't kill the delayed
4549          * items.
4550          */
4551         if (min_type == 0 && root == BTRFS_I(inode)->root)
4552                 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4553 
4554         key.objectid = ino;
4555         key.offset = (u64)-1;
4556         key.type = (u8)-1;
4557 
4558 search_again:
4559         /*
4560          * with a 16K leaf size and 128MB extents, you can actually queue
4561          * up a huge file in a single leaf.  Most of the time that
4562          * bytes_deleted is > 0, it will be huge by the time we get here
4563          */
4564         if (be_nice && bytes_deleted > SZ_32M &&
4565             btrfs_should_end_transaction(trans)) {
4566                 ret = -EAGAIN;
4567                 goto out;
4568         }
4569 
4570         path->leave_spinning = 1;
4571         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4572         if (ret < 0)
4573                 goto out;
4574 
4575         if (ret > 0) {
4576                 ret = 0;
4577                 /* there are no items in the tree for us to truncate, we're
4578                  * done
4579                  */
4580                 if (path->slots[0] == 0)
4581                         goto out;
4582                 path->slots[0]--;
4583         }
4584 
4585         while (1) {
4586                 fi = NULL;
4587                 leaf = path->nodes[0];
4588                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4589                 found_type = found_key.type;
4590 
4591                 if (found_key.objectid != ino)
4592                         break;
4593 
4594                 if (found_type < min_type)
4595                         break;
4596 
4597                 item_end = found_key.offset;
4598                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4599                         fi = btrfs_item_ptr(leaf, path->slots[0],
4600                                             struct btrfs_file_extent_item);
4601                         extent_type = btrfs_file_extent_type(leaf, fi);
4602                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4603                                 item_end +=
4604                                     btrfs_file_extent_num_bytes(leaf, fi);
4605 
4606                                 trace_btrfs_truncate_show_fi_regular(
4607                                         BTRFS_I(inode), leaf, fi,
4608                                         found_key.offset);
4609                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4610                                 item_end += btrfs_file_extent_ram_bytes(leaf,
4611                                                                         fi);
4612 
4613                                 trace_btrfs_truncate_show_fi_inline(
4614                                         BTRFS_I(inode), leaf, fi, path->slots[0],
4615                                         found_key.offset);
4616                         }
4617                         item_end--;
4618                 }
4619                 if (found_type > min_type) {
4620                         del_item = 1;
4621                 } else {
4622                         if (item_end < new_size)
4623                                 break;
4624                         if (found_key.offset >= new_size)
4625                                 del_item = 1;
4626                         else
4627                                 del_item = 0;
4628                 }
4629                 found_extent = 0;
4630                 /* FIXME, shrink the extent if the ref count is only 1 */
4631                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4632                         goto delete;
4633 
4634                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4635                         u64 num_dec;
4636                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4637                         if (!del_item) {
4638                                 u64 orig_num_bytes =
4639                                         btrfs_file_extent_num_bytes(leaf, fi);
4640                                 extent_num_bytes = ALIGN(new_size -
4641                                                 found_key.offset,
4642                                                 fs_info->sectorsize);
4643                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4644                                                          extent_num_bytes);
4645                                 num_dec = (orig_num_bytes -
4646                                            extent_num_bytes);
4647                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4648                                              &root->state) &&
4649                                     extent_start != 0)
4650                                         inode_sub_bytes(inode, num_dec);
4651                                 btrfs_mark_buffer_dirty(leaf);
4652                         } else {
4653                                 extent_num_bytes =
4654                                         btrfs_file_extent_disk_num_bytes(leaf,
4655                                                                          fi);
4656                                 extent_offset = found_key.offset -
4657                                         btrfs_file_extent_offset(leaf, fi);
4658 
4659                                 /* FIXME blocksize != 4096 */
4660                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4661                                 if (extent_start != 0) {
4662                                         found_extent = 1;
4663                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4664                                                      &root->state))
4665                                                 inode_sub_bytes(inode, num_dec);
4666                                 }
4667                         }
4668                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4669                         /*
4670                          * we can't truncate inline items that have had
4671                          * special encodings
4672                          */
4673                         if (!del_item &&
4674                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4675                             btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
4676                             btrfs_file_extent_compression(leaf, fi) == 0) {
4677                                 u32 size = (u32)(new_size - found_key.offset);
4678 
4679                                 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4680                                 size = btrfs_file_extent_calc_inline_size(size);
4681                                 btrfs_truncate_item(root->fs_info, path, size, 1);
4682                         } else if (!del_item) {
4683                                 /*
4684                                  * We have to bail so the last_size is set to
4685                                  * just before this extent.
4686                                  */
4687                                 ret = NEED_TRUNCATE_BLOCK;
4688                                 break;
4689                         }
4690 
4691                         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4692                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4693                 }
4694 delete:
4695                 if (del_item)
4696                         last_size = found_key.offset;
4697                 else
4698                         last_size = new_size;
4699                 if (del_item) {
4700                         if (!pending_del_nr) {
4701                                 /* no pending yet, add ourselves */
4702                                 pending_del_slot = path->slots[0];
4703                                 pending_del_nr = 1;
4704                         } else if (pending_del_nr &&
4705                                    path->slots[0] + 1 == pending_del_slot) {
4706                                 /* hop on the pending chunk */
4707                                 pending_del_nr++;
4708                                 pending_del_slot = path->slots[0];
4709                         } else {
4710                                 BUG();
4711                         }
4712                 } else {
4713                         break;
4714                 }
4715                 should_throttle = false;
4716 
4717                 if (found_extent &&
4718                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4719                      root == fs_info->tree_root)) {
4720                         btrfs_set_path_blocking(path);
4721                         bytes_deleted += extent_num_bytes;
4722                         ret = btrfs_free_extent(trans, root, extent_start,
4723                                                 extent_num_bytes, 0,
4724                                                 btrfs_header_owner(leaf),
4725                                                 ino, extent_offset);
4726                         if (ret) {
4727                                 btrfs_abort_transaction(trans, ret);
4728                                 break;
4729                         }
4730                         if (btrfs_should_throttle_delayed_refs(trans, fs_info))
4731                                 btrfs_async_run_delayed_refs(fs_info,
4732                                         trans->delayed_ref_updates * 2,
4733                                         trans->transid, 0);
4734                         if (be_nice) {
4735                                 if (truncate_space_check(trans, root,
4736                                                          extent_num_bytes)) {
4737                                         should_end = true;
4738                                 }
4739                                 if (btrfs_should_throttle_delayed_refs(trans,
4740                                                                        fs_info))
4741                                         should_throttle = true;
4742                         }
4743                 }
4744 
4745                 if (found_type == BTRFS_INODE_ITEM_KEY)
4746                         break;
4747 
4748                 if (path->slots[0] == 0 ||
4749                     path->slots[0] != pending_del_slot ||
4750                     should_throttle || should_end) {
4751                         if (pending_del_nr) {
4752                                 ret = btrfs_del_items(trans, root, path,
4753                                                 pending_del_slot,
4754                                                 pending_del_nr);
4755                                 if (ret) {
4756                                         btrfs_abort_transaction(trans, ret);
4757                                         break;
4758                                 }
4759                                 pending_del_nr = 0;
4760                         }
4761                         btrfs_release_path(path);
4762                         if (should_throttle) {
4763                                 unsigned long updates = trans->delayed_ref_updates;
4764                                 if (updates) {
4765                                         trans->delayed_ref_updates = 0;
4766                                         ret = btrfs_run_delayed_refs(trans,
4767                                                                    updates * 2);
4768                                         if (ret)
4769                                                 break;
4770                                 }
4771                         }
4772                         /*
4773                          * if we failed to refill our space rsv, bail out
4774                          * and let the transaction restart
4775                          */
4776                         if (should_end) {
4777                                 ret = -EAGAIN;
4778                                 break;
4779                         }
4780                         goto search_again;
4781                 } else {
4782                         path->slots[0]--;
4783                 }
4784         }
4785 out:
4786         if (ret >= 0 && pending_del_nr) {
4787                 int err;
4788 
4789                 err = btrfs_del_items(trans, root, path, pending_del_slot,
4790                                       pending_del_nr);
4791                 if (err) {
4792                         btrfs_abort_transaction(trans, err);
4793                         ret = err;
4794                 }
4795         }
4796         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4797                 ASSERT(last_size >= new_size);
4798                 if (!ret && last_size > new_size)
4799                         last_size = new_size;
4800                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4801         }
4802 
4803         btrfs_free_path(path);
4804 
4805         if (be_nice && bytes_deleted > SZ_32M && (ret >= 0 || ret == -EAGAIN)) {
4806                 unsigned long updates = trans->delayed_ref_updates;
4807                 int err;
4808 
4809                 if (updates) {
4810                         trans->delayed_ref_updates = 0;
4811                         err = btrfs_run_delayed_refs(trans, updates * 2);
4812                         if (err)
4813                                 ret = err;
4814                 }
4815         }
4816         return ret;
4817 }
4818 
4819 /*
4820  * btrfs_truncate_block - read, zero a chunk and write a block
4821  * @inode - inode that we're zeroing
4822  * @from - the offset to start zeroing
4823  * @len - the length to zero, 0 to zero the entire range respective to the
4824  *      offset
4825  * @front - zero up to the offset instead of from the offset on
4826  *
4827  * This will find the block for the "from" offset and cow the block and zero the
4828  * part we want to zero.  This is used with truncate and hole punching.
4829  */
4830 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4831                         int front)
4832 {
4833         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4834         struct address_space *mapping = inode->i_mapping;
4835         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4836         struct btrfs_ordered_extent *ordered;
4837         struct extent_state *cached_state = NULL;
4838         struct extent_changeset *data_reserved = NULL;
4839         char *kaddr;
4840         u32 blocksize = fs_info->sectorsize;
4841         pgoff_t index = from >> PAGE_SHIFT;
4842         unsigned offset = from & (blocksize - 1);
4843         struct page *page;
4844         gfp_t mask = btrfs_alloc_write_mask(mapping);
4845         int ret = 0;
4846         u64 block_start;
4847         u64 block_end;
4848 
4849         if (IS_ALIGNED(offset, blocksize) &&
4850             (!len || IS_ALIGNED(len, blocksize)))
4851                 goto out;
4852 
4853         block_start = round_down(from, blocksize);
4854         block_end = block_start + blocksize - 1;
4855 
4856         ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
4857                                            block_start, blocksize);
4858         if (ret)
4859                 goto out;
4860 
4861 again:
4862         page = find_or_create_page(mapping, index, mask);
4863         if (!page) {
4864                 btrfs_delalloc_release_space(inode, data_reserved,
4865                                              block_start, blocksize, true);
4866                 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
4867                 ret = -ENOMEM;
4868                 goto out;
4869         }
4870 
4871         if (!PageUptodate(page)) {
4872                 ret = btrfs_readpage(NULL, page);
4873                 lock_page(page);
4874                 if (page->mapping != mapping) {
4875                         unlock_page(page);
4876                         put_page(page);
4877                         goto again;
4878                 }
4879                 if (!PageUptodate(page)) {
4880                         ret = -EIO;
4881                         goto out_unlock;
4882                 }
4883         }
4884         wait_on_page_writeback(page);
4885 
4886         lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4887         set_page_extent_mapped(page);
4888 
4889         ordered = btrfs_lookup_ordered_extent(inode, block_start);
4890         if (ordered) {
4891                 unlock_extent_cached(io_tree, block_start, block_end,
4892                                      &cached_state);
4893                 unlock_page(page);
4894                 put_page(page);
4895                 btrfs_start_ordered_extent(inode, ordered, 1);
4896                 btrfs_put_ordered_extent(ordered);
4897                 goto again;
4898         }
4899 
4900         clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4901                           EXTENT_DIRTY | EXTENT_DELALLOC |
4902                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4903                           0, 0, &cached_state);
4904 
4905         ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4906                                         &cached_state, 0);
4907         if (ret) {
4908                 unlock_extent_cached(io_tree, block_start, block_end,
4909                                      &cached_state);
4910                 goto out_unlock;
4911         }
4912 
4913         if (offset != blocksize) {
4914                 if (!len)
4915                         len = blocksize - offset;
4916                 kaddr = kmap(page);
4917                 if (front)
4918                         memset(kaddr + (block_start - page_offset(page)),
4919                                 0, offset);
4920                 else
4921                         memset(kaddr + (block_start - page_offset(page)) +  offset,
4922                                 0, len);
4923                 flush_dcache_page(page);
4924                 kunmap(page);
4925         }
4926         ClearPageChecked(page);
4927         set_page_dirty(page);
4928         unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
4929 
4930 out_unlock:
4931         if (ret)
4932                 btrfs_delalloc_release_space(inode, data_reserved, block_start,
4933                                              blocksize, true);
4934         btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
4935         unlock_page(page);
4936         put_page(page);
4937 out:
4938         extent_changeset_free(data_reserved);
4939         return ret;
4940 }
4941 
4942 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4943                              u64 offset, u64 len)
4944 {
4945         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4946         struct btrfs_trans_handle *trans;
4947         int ret;
4948 
4949         /*
4950          * Still need to make sure the inode looks like it's been updated so
4951          * that any holes get logged if we fsync.
4952          */
4953         if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
4954                 BTRFS_I(inode)->last_trans = fs_info->generation;
4955                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4956                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4957                 return 0;
4958         }
4959 
4960         /*
4961          * 1 - for the one we're dropping
4962          * 1 - for the one we're adding
4963          * 1 - for updating the inode.
4964          */
4965         trans = btrfs_start_transaction(root, 3);
4966         if (IS_ERR(trans))
4967                 return PTR_ERR(trans);
4968 
4969         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4970         if (ret) {
4971                 btrfs_abort_transaction(trans, ret);
4972                 btrfs_end_transaction(trans);
4973                 return ret;
4974         }
4975 
4976         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
4977                         offset, 0, 0, len, 0, len, 0, 0, 0);
4978         if (ret)
4979                 btrfs_abort_transaction(trans, ret);
4980         else
4981                 btrfs_update_inode(trans, root, inode);
4982         btrfs_end_transaction(trans);
4983         return ret;
4984 }
4985 
4986 /*
4987  * This function puts in dummy file extents for the area we're creating a hole
4988  * for.  So if we are truncating this file to a larger size we need to insert
4989  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4990  * the range between oldsize and size
4991  */
4992 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4993 {
4994         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4995         struct btrfs_root *root = BTRFS_I(inode)->root;
4996         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4997         struct extent_map *em = NULL;
4998         struct extent_state *cached_state = NULL;
4999         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5000         u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5001         u64 block_end = ALIGN(size, fs_info->sectorsize);
5002         u64 last_byte;
5003         u64 cur_offset;
5004         u64 hole_size;
5005         int err = 0;
5006 
5007         /*
5008          * If our size started in the middle of a block we need to zero out the
5009          * rest of the block before we expand the i_size, otherwise we could
5010          * expose stale data.
5011          */
5012         err = btrfs_truncate_block(inode, oldsize, 0, 0);
5013         if (err)
5014                 return err;
5015 
5016         if (size <= hole_start)
5017                 return 0;
5018 
5019         while (1) {
5020                 struct btrfs_ordered_extent *ordered;
5021 
5022                 lock_extent_bits(io_tree, hole_start, block_end - 1,
5023                                  &cached_state);
5024                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
5025                                                      block_end - hole_start);
5026                 if (!ordered)
5027                         break;
5028                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
5029                                      &cached_state);
5030                 btrfs_start_ordered_extent(inode, ordered, 1);
5031                 btrfs_put_ordered_extent(ordered);
5032         }
5033 
5034         cur_offset = hole_start;
5035         while (1) {
5036                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
5037                                 block_end - cur_offset, 0);
5038                 if (IS_ERR(em)) {
5039                         err = PTR_ERR(em);
5040                         em = NULL;
5041                         break;
5042                 }
5043                 last_byte = min(extent_map_end(em), block_end);
5044                 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5045                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5046                         struct extent_map *hole_em;
5047                         hole_size = last_byte - cur_offset;
5048 
5049                         err = maybe_insert_hole(root, inode, cur_offset,
5050                                                 hole_size);
5051                         if (err)
5052                                 break;
5053                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
5054                                                 cur_offset + hole_size - 1, 0);
5055                         hole_em = alloc_extent_map();
5056                         if (!hole_em) {
5057                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5058                                         &BTRFS_I(inode)->runtime_flags);
5059                                 goto next;
5060                         }
5061                         hole_em->start = cur_offset;
5062                         hole_em->len = hole_size;
5063                         hole_em->orig_start = cur_offset;
5064 
5065                         hole_em->block_start = EXTENT_MAP_HOLE;
5066                         hole_em->block_len = 0;
5067                         hole_em->orig_block_len = 0;
5068                         hole_em->ram_bytes = hole_size;
5069                         hole_em->bdev = fs_info->fs_devices->latest_bdev;
5070                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
5071                         hole_em->generation = fs_info->generation;
5072 
5073                         while (1) {
5074                                 write_lock(&em_tree->lock);
5075                                 err = add_extent_mapping(em_tree, hole_em, 1);
5076                                 write_unlock(&em_tree->lock);
5077                                 if (err != -EEXIST)
5078                                         break;
5079                                 btrfs_drop_extent_cache(BTRFS_I(inode),
5080                                                         cur_offset,
5081                                                         cur_offset +
5082                                                         hole_size - 1, 0);
5083                         }
5084                         free_extent_map(hole_em);
5085                 }
5086 next:
5087                 free_extent_map(em);
5088                 em = NULL;
5089                 cur_offset = last_byte;
5090                 if (cur_offset >= block_end)
5091                         break;
5092         }
5093         free_extent_map(em);
5094         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5095         return err;
5096 }
5097 
5098 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5099 {
5100         struct btrfs_root *root = BTRFS_I(inode)->root;
5101         struct btrfs_trans_handle *trans;
5102         loff_t oldsize = i_size_read(inode);
5103         loff_t newsize = attr->ia_size;
5104         int mask = attr->ia_valid;
5105         int ret;
5106 
5107         /*
5108          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5109          * special case where we need to update the times despite not having
5110          * these flags set.  For all other operations the VFS set these flags
5111          * explicitly if it wants a timestamp update.
5112          */
5113         if (newsize != oldsize) {
5114                 inode_inc_iversion(inode);
5115                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5116                         inode->i_ctime = inode->i_mtime =
5117                                 current_time(inode);
5118         }
5119 
5120         if (newsize > oldsize) {
5121                 /*
5122                  * Don't do an expanding truncate while snapshotting is ongoing.
5123                  * This is to ensure the snapshot captures a fully consistent
5124                  * state of this file - if the snapshot captures this expanding
5125                  * truncation, it must capture all writes that happened before
5126                  * this truncation.
5127                  */
5128                 btrfs_wait_for_snapshot_creation(root);
5129                 ret = btrfs_cont_expand(inode, oldsize, newsize);
5130                 if (ret) {
5131                         btrfs_end_write_no_snapshotting(root);
5132                         return ret;
5133                 }
5134 
5135                 trans = btrfs_start_transaction(root, 1);
5136                 if (IS_ERR(trans)) {
5137                         btrfs_end_write_no_snapshotting(root);
5138                         return PTR_ERR(trans);
5139                 }
5140 
5141                 i_size_write(inode, newsize);
5142                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
5143                 pagecache_isize_extended(inode, oldsize, newsize);
5144                 ret = btrfs_update_inode(trans, root, inode);
5145                 btrfs_end_write_no_snapshotting(root);
5146                 btrfs_end_transaction(trans);
5147         } else {
5148 
5149                 /*
5150                  * We're truncating a file that used to have good data down to
5151                  * zero. Make sure it gets into the ordered flush list so that
5152                  * any new writes get down to disk quickly.
5153                  */
5154                 if (newsize == 0)
5155                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
5156                                 &BTRFS_I(inode)->runtime_flags);
5157 
5158                 truncate_setsize(inode, newsize);
5159 
5160                 /* Disable nonlocked read DIO to avoid the end less truncate */
5161                 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
5162                 inode_dio_wait(inode);
5163                 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
5164 
5165                 ret = btrfs_truncate(inode, newsize == oldsize);
5166                 if (ret && inode->i_nlink) {
5167                         int err;
5168 
5169                         /*
5170                          * Truncate failed, so fix up the in-memory size. We
5171                          * adjusted disk_i_size down as we removed extents, so
5172                          * wait for disk_i_size to be stable and then update the
5173                          * in-memory size to match.
5174                          */
5175                         err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5176                         if (err)
5177                                 return err;
5178                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5179                 }
5180         }
5181 
5182         return ret;
5183 }
5184 
5185 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5186 {
5187         struct inode *inode = d_inode(dentry);
5188         struct btrfs_root *root = BTRFS_I(inode)->root;
5189         int err;
5190 
5191         if (btrfs_root_readonly(root))
5192                 return -EROFS;
5193 
5194         err = setattr_prepare(dentry, attr);
5195         if (err)
5196                 return err;
5197 
5198         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5199                 err = btrfs_setsize(inode, attr);
5200                 if (err)
5201                         return err;
5202         }
5203 
5204         if (attr->ia_valid) {
5205                 setattr_copy(inode, attr);
5206                 inode_inc_iversion(inode);
5207                 err = btrfs_dirty_inode(inode);
5208 
5209                 if (!err && attr->ia_valid & ATTR_MODE)
5210                         err = posix_acl_chmod(inode, inode->i_mode);
5211         }
5212 
5213         return err;
5214 }
5215 
5216 /*
5217  * While truncating the inode pages during eviction, we get the VFS calling
5218  * btrfs_invalidatepage() against each page of the inode. This is slow because
5219  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5220  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5221  * extent_state structures over and over, wasting lots of time.
5222  *
5223  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5224  * those expensive operations on a per page basis and do only the ordered io
5225  * finishing, while we release here the extent_map and extent_state structures,
5226  * without the excessive merging and splitting.
5227  */
5228 static void evict_inode_truncate_pages(struct inode *inode)
5229 {
5230         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5231         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5232         struct rb_node *node;
5233 
5234         ASSERT(inode->i_state & I_FREEING);
5235         truncate_inode_pages_final(&inode->i_data);
5236 
5237         write_lock(&map_tree->lock);
5238         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5239                 struct extent_map *em;
5240 
5241                 node = rb_first(&map_tree->map);
5242                 em = rb_entry(node, struct extent_map, rb_node);
5243                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5244                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5245                 remove_extent_mapping(map_tree, em);
5246                 free_extent_map(em);
5247                 if (need_resched()) {
5248                         write_unlock(&map_tree->lock);
5249                         cond_resched();
5250                         write_lock(&map_tree->lock);
5251                 }
5252         }
5253         write_unlock(&map_tree->lock);
5254 
5255         /*
5256          * Keep looping until we have no more ranges in the io tree.
5257          * We can have ongoing bios started by readpages (called from readahead)
5258          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5259          * still in progress (unlocked the pages in the bio but did not yet
5260          * unlocked the ranges in the io tree). Therefore this means some
5261          * ranges can still be locked and eviction started because before
5262          * submitting those bios, which are executed by a separate task (work
5263          * queue kthread), inode references (inode->i_count) were not taken
5264          * (which would be dropped in the end io callback of each bio).
5265          * Therefore here we effectively end up waiting for those bios and
5266          * anyone else holding locked ranges without having bumped the inode's
5267          * reference count - if we don't do it, when they access the inode's
5268          * io_tree to unlock a range it may be too late, leading to an
5269          * use-after-free issue.
5270          */
5271         spin_lock(&io_tree->lock);
5272         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5273                 struct extent_state *state;
5274                 struct extent_state *cached_state = NULL;
5275                 u64 start;
5276                 u64 end;
5277 
5278                 node = rb_first(&io_tree->state);
5279                 state = rb_entry(node, struct extent_state, rb_node);
5280                 start = state->start;
5281                 end = state->end;
5282                 spin_unlock(&io_tree->lock);
5283 
5284                 lock_extent_bits(io_tree, start, end, &cached_state);
5285 
5286                 /*
5287                  * If still has DELALLOC flag, the extent didn't reach disk,
5288                  * and its reserved space won't be freed by delayed_ref.
5289                  * So we need to free its reserved space here.
5290                  * (Refer to comment in btrfs_invalidatepage, case 2)
5291                  *
5292                  * Note, end is the bytenr of last byte, so we need + 1 here.
5293                  */
5294                 if (state->state & EXTENT_DELALLOC)
5295                         btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
5296 
5297                 clear_extent_bit(io_tree, start, end,
5298                                  EXTENT_LOCKED | EXTENT_DIRTY |
5299                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5300                                  EXTENT_DEFRAG, 1, 1, &cached_state);
5301 
5302                 cond_resched();
5303                 spin_lock(&io_tree->lock);
5304         }
5305         spin_unlock(&io_tree->lock);
5306 }
5307 
5308 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5309                                                         struct btrfs_block_rsv *rsv,
5310                                                         u64 min_size)
5311 {
5312         struct btrfs_fs_info *fs_info = root->fs_info;
5313         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5314         int failures = 0;
5315 
5316         for (;;) {
5317                 struct btrfs_trans_handle *trans;
5318                 int ret;
5319 
5320                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5321                                              BTRFS_RESERVE_FLUSH_LIMIT);
5322 
5323                 if (ret && ++failures > 2) {
5324                         btrfs_warn(fs_info,
5325                                    "could not allocate space for a delete; will truncate on mount");
5326                         return ERR_PTR(-ENOSPC);
5327                 }
5328 
5329                 trans = btrfs_join_transaction(root);
5330                 if (IS_ERR(trans) || !ret)
5331                         return trans;
5332 
5333                 /*
5334                  * Try to steal from the global reserve if there is space for
5335                  * it.
5336                  */
5337                 if (!btrfs_check_space_for_delayed_refs(trans, fs_info) &&
5338                     !btrfs_block_rsv_migrate(global_rsv, rsv, min_size, 0))
5339                         return trans;
5340 
5341                 /* If not, commit and try again. */
5342                 ret = btrfs_commit_transaction(trans);
5343                 if (ret)
5344                         return ERR_PTR(ret);
5345         }
5346 }
5347 
5348 void btrfs_evict_inode(struct inode *inode)
5349 {
5350         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5351         struct btrfs_trans_handle *trans;
5352         struct btrfs_root *root = BTRFS_I(inode)->root;
5353         struct btrfs_block_rsv *rsv;
5354         u64 min_size;
5355         int ret;
5356 
5357         trace_btrfs_inode_evict(inode);
5358 
5359         if (!root) {
5360                 clear_inode(inode);
5361                 return;
5362         }
5363 
5364         min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
5365 
5366         evict_inode_truncate_pages(inode);
5367 
5368         if (inode->i_nlink &&
5369             ((btrfs_root_refs(&root->root_item) != 0 &&
5370               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5371              btrfs_is_free_space_inode(BTRFS_I(inode))))
5372                 goto no_delete;
5373 
5374         if (is_bad_inode(inode))
5375                 goto no_delete;
5376         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5377         if (!special_file(inode->i_mode))
5378                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5379 
5380         btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5381 
5382         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5383                 goto no_delete;
5384 
5385         if (inode->i_nlink > 0) {
5386                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5387                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5388                 goto no_delete;
5389         }
5390 
5391         ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5392         if (ret)
5393                 goto no_delete;
5394 
5395         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5396         if (!rsv)
5397                 goto no_delete;
5398         rsv->size = min_size;
5399         rsv->failfast = 1;
5400 
5401         btrfs_i_size_write(BTRFS_I(inode), 0);
5402 
5403         while (1) {
5404                 trans = evict_refill_and_join(root, rsv, min_size);
5405                 if (IS_ERR(trans))
5406                         goto free_rsv;
5407 
5408                 trans->block_rsv = rsv;
5409 
5410                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5411                 trans->block_rsv = &fs_info->trans_block_rsv;
5412                 btrfs_end_transaction(trans);
5413                 btrfs_btree_balance_dirty(fs_info);
5414                 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5415                         goto free_rsv;
5416                 else if (!ret)
5417                         break;
5418         }
5419 
5420         /*
5421          * Errors here aren't a big deal, it just means we leave orphan items in
5422          * the tree. They will be cleaned up on the next mount. If the inode
5423          * number gets reused, cleanup deletes the orphan item without doing
5424          * anything, and unlink reuses the existing orphan item.
5425          *
5426          * If it turns out that we are dropping too many of these, we might want
5427          * to add a mechanism for retrying these after a commit.
5428          */
5429         trans = evict_refill_and_join(root, rsv, min_size);
5430         if (!IS_ERR(trans)) {
5431                 trans->block_rsv = rsv;
5432                 btrfs_orphan_del(trans, BTRFS_I(inode));
5433                 trans->block_rsv = &fs_info->trans_block_rsv;
5434                 btrfs_end_transaction(trans);
5435         }
5436 
5437         if (!(root == fs_info->tree_root ||
5438               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5439                 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
5440 
5441 free_rsv:
5442         btrfs_free_block_rsv(fs_info, rsv);
5443 no_delete:
5444         /*
5445          * If we didn't successfully delete, the orphan item will still be in
5446          * the tree and we'll retry on the next mount. Again, we might also want
5447          * to retry these periodically in the future.
5448          */
5449         btrfs_remove_delayed_node(BTRFS_I(inode));
5450         clear_inode(inode);
5451 }
5452 
5453 /*
5454  * this returns the key found in the dir entry in the location pointer.
5455  * If no dir entries were found, returns -ENOENT.
5456  * If found a corrupted location in dir entry, returns -EUCLEAN.
5457  */
5458 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5459                                struct btrfs_key *location)
5460 {
5461         const char *name = dentry->d_name.name;
5462         int namelen = dentry->d_name.len;
5463         struct btrfs_dir_item *di;
5464         struct btrfs_path *path;
5465         struct btrfs_root *root = BTRFS_I(dir)->root;
5466         int ret = 0;
5467 
5468         path = btrfs_alloc_path();
5469         if (!path)
5470                 return -ENOMEM;
5471 
5472         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5473                         name, namelen, 0);
5474         if (!di) {
5475                 ret = -ENOENT;
5476                 goto out;
5477         }
5478         if (IS_ERR(di)) {
5479                 ret = PTR_ERR(di);
5480                 goto out;
5481         }
5482 
5483         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5484         if (location->type != BTRFS_INODE_ITEM_KEY &&
5485             location->type != BTRFS_ROOT_ITEM_KEY) {
5486                 ret = -EUCLEAN;
5487                 btrfs_warn(root->fs_info,
5488 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5489                            __func__, name, btrfs_ino(BTRFS_I(dir)),
5490                            location->objectid, location->type, location->offset);
5491         }
5492 out:
5493         btrfs_free_path(path);
5494         return ret;
5495 }
5496 
5497 /*
5498  * when we hit a tree root in a directory, the btrfs part of the inode
5499  * needs to be changed to reflect the root directory of the tree root.  This
5500  * is kind of like crossing a mount point.
5501  */
5502 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5503                                     struct inode *dir,
5504                                     struct dentry *dentry,
5505                                     struct btrfs_key *location,
5506                                     struct btrfs_root **sub_root)
5507 {
5508         struct btrfs_path *path;
5509         struct btrfs_root *new_root;
5510         struct btrfs_root_ref *ref;
5511         struct extent_buffer *leaf;
5512         struct btrfs_key key;
5513         int ret;
5514         int err = 0;
5515 
5516         path = btrfs_alloc_path();
5517         if (!path) {
5518                 err = -ENOMEM;
5519                 goto out;
5520         }
5521 
5522         err = -ENOENT;
5523         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5524         key.type = BTRFS_ROOT_REF_KEY;
5525         key.offset = location->objectid;
5526 
5527         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5528         if (ret) {
5529                 if (ret < 0)
5530                         err = ret;
5531                 goto out;
5532         }
5533 
5534         leaf = path->nodes[0];
5535         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5536         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5537             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5538                 goto out;
5539 
5540         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5541                                    (unsigned long)(ref + 1),
5542                                    dentry->d_name.len);
5543         if (ret)
5544                 goto out;
5545 
5546         btrfs_release_path(path);
5547 
5548         new_root = btrfs_read_fs_root_no_name(fs_info, location);
5549         if (IS_ERR(new_root)) {
5550                 err = PTR_ERR(new_root);
5551                 goto out;
5552         }
5553 
5554         *sub_root = new_root;
5555         location->objectid = btrfs_root_dirid(&new_root->root_item);
5556         location->type = BTRFS_INODE_ITEM_KEY;
5557         location->offset = 0;
5558         err = 0;
5559 out:
5560         btrfs_free_path(path);
5561         return err;
5562 }
5563 
5564 static void inode_tree_add(struct inode *inode)
5565 {
5566         struct btrfs_root *root = BTRFS_I(inode)->root;
5567         struct btrfs_inode *entry;
5568         struct rb_node **p;
5569         struct rb_node *parent;
5570         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5571         u64 ino = btrfs_ino(BTRFS_I(inode));
5572 
5573         if (inode_unhashed(inode))
5574                 return;
5575         parent = NULL;
5576         spin_lock(&root->inode_lock);
5577         p = &root->inode_tree.rb_node;
5578         while (*p) {
5579                 parent = *p;
5580                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5581 
5582                 if (ino < btrfs_ino(entry))
5583                         p = &parent->rb_left;
5584                 else if (ino > btrfs_ino(entry))
5585                         p = &parent->rb_right;
5586                 else {
5587                         WARN_ON(!(entry->vfs_inode.i_state &
5588                                   (I_WILL_FREE | I_FREEING)));
5589                         rb_replace_node(parent, new, &root->inode_tree);
5590                         RB_CLEAR_NODE(parent);
5591                         spin_unlock(&root->inode_lock);
5592                         return;
5593                 }
5594         }
5595         rb_link_node(new, parent, p);
5596         rb_insert_color(new, &root->inode_tree);
5597         spin_unlock(&root->inode_lock);
5598 }
5599 
5600 static void inode_tree_del(struct inode *inode)
5601 {
5602         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5603         struct btrfs_root *root = BTRFS_I(inode)->root;
5604         int empty = 0;
5605 
5606         spin_lock(&root->inode_lock);
5607         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5608                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5609                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5610                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5611         }
5612         spin_unlock(&root->inode_lock);
5613 
5614         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5615                 synchronize_srcu(&fs_info->subvol_srcu);
5616                 spin_lock(&root->inode_lock);
5617                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5618                 spin_unlock(&root->inode_lock);
5619                 if (empty)
5620                         btrfs_add_dead_root(root);
5621         }
5622 }
5623 
5624 
5625 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5626 {
5627         struct btrfs_iget_args *args = p;
5628         inode->i_ino = args->location->objectid;
5629         memcpy(&BTRFS_I(inode)->location, args->location,
5630                sizeof(*args->location));
5631         BTRFS_I(inode)->root = args->root;
5632         return 0;
5633 }
5634 
5635 static int btrfs_find_actor(struct inode *inode, void *opaque)
5636 {
5637         struct btrfs_iget_args *args = opaque;
5638         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5639                 args->root == BTRFS_I(inode)->root;
5640 }
5641 
5642 static struct inode *btrfs_iget_locked(struct super_block *s,
5643                                        struct btrfs_key *location,
5644                                        struct btrfs_root *root)
5645 {
5646         struct inode *inode;
5647         struct btrfs_iget_args args;
5648         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5649 
5650         args.location = location;
5651         args.root = root;
5652 
5653         inode = iget5_locked(s, hashval, btrfs_find_actor,
5654                              btrfs_init_locked_inode,
5655                              (void *)&args);
5656         return inode;
5657 }
5658 
5659 /* Get an inode object given its location and corresponding root.
5660  * Returns in *is_new if the inode was read from disk
5661  */
5662 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5663                          struct btrfs_root *root, int *new)
5664 {
5665         struct inode *inode;
5666 
5667         inode = btrfs_iget_locked(s, location, root);
5668         if (!inode)
5669                 return ERR_PTR(-ENOMEM);
5670 
5671         if (inode->i_state & I_NEW) {
5672                 int ret;
5673 
5674                 ret = btrfs_read_locked_inode(inode);
5675                 if (!ret) {
5676                         inode_tree_add(inode);
5677                         unlock_new_inode(inode);
5678                         if (new)
5679                                 *new = 1;
5680                 } else {
5681                         iget_failed(inode);
5682                         /*
5683                          * ret > 0 can come from btrfs_search_slot called by
5684                          * btrfs_read_locked_inode, this means the inode item
5685                          * was not found.
5686                          */
5687                         if (ret > 0)
5688                                 ret = -ENOENT;
5689                         inode = ERR_PTR(ret);
5690                 }
5691         }
5692 
5693         return inode;
5694 }
5695 
5696 static struct inode *new_simple_dir(struct super_block *s,
5697                                     struct btrfs_key *key,
5698                                     struct btrfs_root *root)
5699 {
5700         struct inode *inode = new_inode(s);
5701 
5702         if (!inode)
5703                 return ERR_PTR(-ENOMEM);
5704 
5705         BTRFS_I(inode)->root = root;
5706         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5707         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5708 
5709         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5710         inode->i_op = &btrfs_dir_ro_inode_operations;
5711         inode->i_opflags &= ~IOP_XATTR;
5712         inode->i_fop = &simple_dir_operations;
5713         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5714         inode->i_mtime = current_time(inode);
5715         inode->i_atime = inode->i_mtime;
5716         inode->i_ctime = inode->i_mtime;
5717         BTRFS_I(inode)->i_otime = inode->i_mtime;
5718 
5719         return inode;
5720 }
5721 
5722 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5723 {
5724         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5725         struct inode *inode;
5726         struct btrfs_root *root = BTRFS_I(dir)->root;
5727         struct btrfs_root *sub_root = root;
5728         struct btrfs_key location;
5729         int index;
5730         int ret = 0;
5731 
5732         if (dentry->d_name.len > BTRFS_NAME_LEN)
5733                 return ERR_PTR(-ENAMETOOLONG);
5734 
5735         ret = btrfs_inode_by_name(dir, dentry, &location);
5736         if (ret < 0)
5737                 return ERR_PTR(ret);
5738 
5739         if (location.type == BTRFS_INODE_ITEM_KEY) {
5740                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5741                 return inode;
5742         }
5743 
5744         index = srcu_read_lock(&fs_info->subvol_srcu);
5745         ret = fixup_tree_root_location(fs_info, dir, dentry,
5746                                        &location, &sub_root);
5747         if (ret < 0) {
5748                 if (ret != -ENOENT)
5749                         inode = ERR_PTR(ret);
5750                 else
5751                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5752         } else {
5753                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5754         }
5755         srcu_read_unlock(&fs_info->subvol_srcu, index);
5756 
5757         if (!IS_ERR(inode) && root != sub_root) {
5758                 down_read(&fs_info->cleanup_work_sem);
5759                 if (!sb_rdonly(inode->i_sb))
5760                         ret = btrfs_orphan_cleanup(sub_root);
5761                 up_read(&fs_info->cleanup_work_sem);
5762                 if (ret) {
5763                         iput(inode);
5764                         inode = ERR_PTR(ret);
5765                 }
5766         }
5767 
5768         return inode;
5769 }
5770 
5771 static int btrfs_dentry_delete(const struct dentry *dentry)
5772 {
5773         struct btrfs_root *root;
5774         struct inode *inode = d_inode(dentry);
5775 
5776         if (!inode && !IS_ROOT(dentry))
5777                 inode = d_inode(dentry->d_parent);
5778 
5779         if (inode) {
5780                 root = BTRFS_I(inode)->root;
5781                 if (btrfs_root_refs(&root->root_item) == 0)
5782                         return 1;
5783 
5784                 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5785                         return 1;
5786         }
5787         return 0;
5788 }
5789 
5790 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5791                                    unsigned int flags)
5792 {
5793         struct inode *inode;
5794 
5795         inode = btrfs_lookup_dentry(dir, dentry);
5796         if (IS_ERR(inode)) {
5797                 if (PTR_ERR(inode) == -ENOENT)
5798                         inode = NULL;
5799                 else
5800                         return ERR_CAST(inode);
5801         }
5802 
5803         return d_splice_alias(inode, dentry);
5804 }
5805 
5806 unsigned char btrfs_filetype_table[] = {
5807         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5808 };
5809 
5810 /*
5811  * All this infrastructure exists because dir_emit can fault, and we are holding
5812  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5813  * our information into that, and then dir_emit from the buffer.  This is
5814  * similar to what NFS does, only we don't keep the buffer around in pagecache
5815  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5816  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5817  * tree lock.
5818  */
5819 static int btrfs_opendir(struct inode *inode, struct file *file)
5820 {
5821         struct btrfs_file_private *private;
5822 
5823         private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5824         if (!private)
5825                 return -ENOMEM;
5826         private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5827         if (!private->filldir_buf) {
5828                 kfree(private);
5829                 return -ENOMEM;
5830         }
5831         file->private_data = private;
5832         return 0;
5833 }
5834 
5835 struct dir_entry {
5836         u64 ino;
5837         u64 offset;
5838         unsigned type;
5839         int name_len;
5840 };
5841 
5842 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5843 {
5844         while (entries--) {
5845                 struct dir_entry *entry = addr;
5846                 char *name = (char *)(entry + 1);
5847 
5848                 ctx->pos = get_unaligned(&entry->offset);
5849                 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5850                                          get_unaligned(&entry->ino),
5851                                          get_unaligned(&entry->type)))
5852                         return 1;
5853                 addr += sizeof(struct dir_entry) +
5854                         get_unaligned(&entry->name_len);
5855                 ctx->pos++;
5856         }
5857         return 0;
5858 }
5859 
5860 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5861 {
5862         struct inode *inode = file_inode(file);
5863         struct btrfs_root *root = BTRFS_I(inode)->root;
5864         struct btrfs_file_private *private = file->private_data;
5865         struct btrfs_dir_item *di;
5866         struct btrfs_key key;
5867         struct btrfs_key found_key;
5868         struct btrfs_path *path;
5869         void *addr;
5870         struct list_head ins_list;
5871         struct list_head del_list;
5872         int ret;
5873         struct extent_buffer *leaf;
5874         int slot;
5875         char *name_ptr;
5876         int name_len;
5877         int entries = 0;
5878         int total_len = 0;
5879         bool put = false;
5880         struct btrfs_key location;
5881 
5882         if (!dir_emit_dots(file, ctx))
5883                 return 0;
5884 
5885         path = btrfs_alloc_path();
5886         if (!path)
5887                 return -ENOMEM;
5888 
5889         addr = private->filldir_buf;
5890         path->reada = READA_FORWARD;
5891 
5892         INIT_LIST_HEAD(&ins_list);
5893         INIT_LIST_HEAD(&del_list);
5894         put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
5895 
5896 again:
5897         key.type = BTRFS_DIR_INDEX_KEY;
5898         key.offset = ctx->pos;
5899         key.objectid = btrfs_ino(BTRFS_I(inode));
5900 
5901         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5902         if (ret < 0)
5903                 goto err;
5904 
5905         while (1) {
5906                 struct dir_entry *entry;
5907 
5908                 leaf = path->nodes[0];
5909                 slot = path->slots[0];
5910                 if (slot >= btrfs_header_nritems(leaf)) {
5911                         ret = btrfs_next_leaf(root, path);
5912                         if (ret < 0)
5913                                 goto err;
5914                         else if (ret > 0)
5915                                 break;
5916                         continue;
5917                 }
5918 
5919                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5920 
5921                 if (found_key.objectid != key.objectid)
5922                         break;
5923                 if (found_key.type != BTRFS_DIR_INDEX_KEY)
5924                         break;
5925                 if (found_key.offset < ctx->pos)
5926                         goto next;
5927                 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5928                         goto next;
5929                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5930                 name_len = btrfs_dir_name_len(leaf, di);
5931                 if ((total_len + sizeof(struct dir_entry) + name_len) >=
5932                     PAGE_SIZE) {
5933                         btrfs_release_path(path);
5934                         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5935                         if (ret)
5936                                 goto nopos;
5937                         addr = private->filldir_buf;
5938                         entries = 0;
5939                         total_len = 0;
5940                         goto again;
5941                 }
5942 
5943                 entry = addr;
5944                 put_unaligned(name_len, &entry->name_len);
5945                 name_ptr = (char *)(entry + 1);
5946                 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
5947                                    name_len);
5948                 put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
5949                                 &entry->type);
5950                 btrfs_dir_item_key_to_cpu(leaf, di, &location);
5951                 put_unaligned(location.objectid, &entry->ino);
5952                 put_unaligned(found_key.offset, &entry->offset);
5953                 entries++;
5954                 addr += sizeof(struct dir_entry) + name_len;
5955                 total_len += sizeof(struct dir_entry) + name_len;
5956 next:
5957                 path->slots[0]++;
5958         }
5959         btrfs_release_path(path);
5960 
5961         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5962         if (ret)
5963                 goto nopos;
5964 
5965         ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5966         if (ret)
5967                 goto nopos;
5968 
5969         /*
5970          * Stop new entries from being returned after we return the last
5971          * entry.
5972          *
5973          * New directory entries are assigned a strictly increasing
5974          * offset.  This means that new entries created during readdir
5975          * are *guaranteed* to be seen in the future by that readdir.
5976          * This has broken buggy programs which operate on names as
5977          * they're returned by readdir.  Until we re-use freed offsets
5978          * we have this hack to stop new entries from being returned
5979          * under the assumption that they'll never reach this huge
5980          * offset.
5981          *
5982          * This is being careful not to overflow 32bit loff_t unless the
5983          * last entry requires it because doing so has broken 32bit apps
5984          * in the past.
5985          */
5986         if (ctx->pos >= INT_MAX)
5987                 ctx->pos = LLONG_MAX;
5988         else
5989                 ctx->pos = INT_MAX;
5990 nopos:
5991         ret = 0;
5992 err:
5993         if (put)
5994                 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5995         btrfs_free_path(path);
5996         return ret;
5997 }
5998 
5999 /*
6000  * This is somewhat expensive, updating the tree every time the
6001  * inode changes.  But, it is most likely to find the inode in cache.
6002  * FIXME, needs more benchmarking...there are no reasons other than performance
6003  * to keep or drop this code.
6004  */
6005 static int btrfs_dirty_inode(struct inode *inode)
6006 {
6007         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6008         struct btrfs_root *root = BTRFS_I(inode)->root;
6009         struct btrfs_trans_handle *trans;
6010         int ret;
6011 
6012         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6013                 return 0;
6014 
6015         trans = btrfs_join_transaction(root);
6016         if (IS_ERR(trans))
6017                 return PTR_ERR(trans);
6018 
6019         ret = btrfs_update_inode(trans, root, inode);
6020         if (ret && ret == -ENOSPC) {
6021                 /* whoops, lets try again with the full transaction */
6022                 btrfs_end_transaction(trans);
6023                 trans = btrfs_start_transaction(root, 1);
6024                 if (IS_ERR(trans))
6025                         return PTR_ERR(trans);
6026 
6027                 ret = btrfs_update_inode(trans, root, inode);
6028         }
6029         btrfs_end_transaction(trans);
6030         if (BTRFS_I(inode)->delayed_node)
6031                 btrfs_balance_delayed_items(fs_info);
6032 
6033         return ret;
6034 }
6035 
6036 /*
6037  * This is a copy of file_update_time.  We need this so we can return error on
6038  * ENOSPC for updating the inode in the case of file write and mmap writes.
6039  */
6040 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6041                              int flags)
6042 {
6043         struct btrfs_root *root = BTRFS_I(inode)->root;
6044         bool dirty = flags & ~S_VERSION;
6045 
6046         if (btrfs_root_readonly(root))
6047                 return -EROFS;
6048 
6049         if (flags & S_VERSION)
6050                 dirty |= inode_maybe_inc_iversion(inode, dirty);
6051         if (flags & S_CTIME)
6052                 inode->i_ctime = *now;
6053         if (flags & S_MTIME)
6054                 inode->i_mtime = *now;
6055         if (flags & S_ATIME)
6056                 inode->i_atime = *now;
6057         return dirty ? btrfs_dirty_inode(inode) : 0;
6058 }
6059 
6060 /*
6061  * find the highest existing sequence number in a directory
6062  * and then set the in-memory index_cnt variable to reflect
6063  * free sequence numbers
6064  */
6065 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6066 {
6067         struct btrfs_root *root = inode->root;
6068         struct btrfs_key key, found_key;
6069         struct btrfs_path *path;
6070         struct extent_buffer *leaf;
6071         int ret;
6072 
6073         key.objectid = btrfs_ino(inode);
6074         key.type = BTRFS_DIR_INDEX_KEY;
6075         key.offset = (u64)-1;
6076 
6077         path = btrfs_alloc_path();
6078         if (!path)
6079                 return -ENOMEM;
6080 
6081         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6082         if (ret < 0)
6083                 goto out;
6084         /* FIXME: we should be able to handle this */
6085         if (ret == 0)
6086                 goto out;
6087         ret = 0;
6088 
6089         /*
6090          * MAGIC NUMBER EXPLANATION:
6091          * since we search a directory based on f_pos we have to start at 2
6092          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6093          * else has to start at 2
6094          */
6095         if (path->slots[0] == 0) {
6096                 inode->index_cnt = 2;
6097                 goto out;
6098         }
6099 
6100         path->slots[0]--;
6101 
6102         leaf = path->nodes[0];
6103         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6104 
6105         if (found_key.objectid != btrfs_ino(inode) ||
6106             found_key.type != BTRFS_DIR_INDEX_KEY) {
6107                 inode->index_cnt = 2;
6108                 goto out;
6109         }
6110 
6111         inode->index_cnt = found_key.offset + 1;
6112 out:
6113         btrfs_free_path(path);
6114         return ret;
6115 }
6116 
6117 /*
6118  * helper to find a free sequence number in a given directory.  This current
6119  * code is very simple, later versions will do smarter things in the btree
6120  */
6121 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6122 {
6123         int ret = 0;
6124 
6125         if (dir->index_cnt == (u64)-1) {
6126                 ret = btrfs_inode_delayed_dir_index_count(dir);
6127                 if (ret) {
6128                         ret = btrfs_set_inode_index_count(dir);
6129                         if (ret)
6130                                 return ret;
6131                 }
6132         }
6133 
6134         *index = dir->index_cnt;
6135         dir->index_cnt++;
6136 
6137         return ret;
6138 }
6139 
6140 static int btrfs_insert_inode_locked(struct inode *inode)
6141 {
6142         struct btrfs_iget_args args;
6143         args.location = &BTRFS_I(inode)->location;
6144         args.root = BTRFS_I(inode)->root;
6145 
6146         return insert_inode_locked4(inode,
6147                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6148                    btrfs_find_actor, &args);
6149 }
6150 
6151 /*
6152  * Inherit flags from the parent inode.
6153  *
6154  * Currently only the compression flags and the cow flags are inherited.
6155  */
6156 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6157 {
6158         unsigned int flags;
6159 
6160         if (!dir)
6161                 return;
6162 
6163         flags = BTRFS_I(dir)->flags;
6164 
6165         if (flags & BTRFS_INODE_NOCOMPRESS) {
6166                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6167                 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6168         } else if (flags & BTRFS_INODE_COMPRESS) {
6169                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6170                 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6171         }
6172 
6173         if (flags & BTRFS_INODE_NODATACOW) {
6174                 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6175                 if (S_ISREG(inode->i_mode))
6176                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6177         }
6178 
6179         btrfs_sync_inode_flags_to_i_flags(inode);
6180 }
6181 
6182 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6183                                      struct btrfs_root *root,
6184                                      struct inode *dir,
6185                                      const char *name, int name_len,
6186                                      u64 ref_objectid, u64 objectid,
6187                                      umode_t mode, u64 *index)
6188 {
6189         struct btrfs_fs_info *fs_info = root->fs_info;
6190         struct inode *inode;
6191         struct btrfs_inode_item *inode_item;
6192         struct btrfs_key *location;
6193         struct btrfs_path *path;
6194         struct btrfs_inode_ref *ref;
6195         struct btrfs_key key[2];
6196         u32 sizes[2];
6197         int nitems = name ? 2 : 1;
6198         unsigned long ptr;
6199         int ret;
6200 
6201         path = btrfs_alloc_path();
6202         if (!path)
6203                 return ERR_PTR(-ENOMEM);
6204 
6205         inode = new_inode(fs_info->sb);
6206         if (!inode) {
6207                 btrfs_free_path(path);
6208                 return ERR_PTR(-ENOMEM);
6209         }
6210 
6211         /*
6212          * O_TMPFILE, set link count to 0, so that after this point,
6213          * we fill in an inode item with the correct link count.
6214          */
6215         if (!name)
6216                 set_nlink(inode, 0);
6217 
6218         /*
6219          * we have to initialize this early, so we can reclaim the inode
6220          * number if we fail afterwards in this function.
6221          */
6222         inode->i_ino = objectid;
6223 
6224         if (dir && name) {
6225                 trace_btrfs_inode_request(dir);
6226 
6227                 ret = btrfs_set_inode_index(BTRFS_I(dir), index);
6228                 if (ret) {
6229                         btrfs_free_path(path);
6230                         iput(inode);
6231                         return ERR_PTR(ret);
6232                 }
6233         } else if (dir) {
6234                 *index = 0;
6235         }
6236         /*
6237          * index_cnt is ignored for everything but a dir,
6238          * btrfs_set_inode_index_count has an explanation for the magic
6239          * number
6240          */
6241         BTRFS_I(inode)->index_cnt = 2;
6242         BTRFS_I(inode)->dir_index = *index;
6243         BTRFS_I(inode)->root = root;
6244         BTRFS_I(inode)->generation = trans->transid;
6245         inode->i_generation = BTRFS_I(inode)->generation;
6246 
6247         /*
6248          * We could have gotten an inode number from somebody who was fsynced
6249          * and then removed in this same transaction, so let's just set full
6250          * sync since it will be a full sync anyway and this will blow away the
6251          * old info in the log.
6252          */
6253         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6254 
6255         key[0].objectid = objectid;
6256         key[0].type = BTRFS_INODE_ITEM_KEY;
6257         key[0].offset = 0;
6258 
6259         sizes[0] = sizeof(struct btrfs_inode_item);
6260 
6261         if (name) {
6262                 /*
6263                  * Start new inodes with an inode_ref. This is slightly more
6264                  * efficient for small numbers of hard links since they will
6265                  * be packed into one item. Extended refs will kick in if we
6266                  * add more hard links than can fit in the ref item.
6267                  */
6268                 key[1].objectid = objectid;
6269                 key[1].type = BTRFS_INODE_REF_KEY;
6270                 key[1].offset = ref_objectid;
6271 
6272                 sizes[1] = name_len + sizeof(*ref);
6273         }
6274 
6275         location = &BTRFS_I(inode)->location;
6276         location->objectid = objectid;
6277         location->offset = 0;
6278         location->type = BTRFS_INODE_ITEM_KEY;
6279 
6280         ret = btrfs_insert_inode_locked(inode);
6281         if (ret < 0) {
6282                 iput(inode);
6283                 goto fail;
6284         }
6285 
6286         path->leave_spinning = 1;
6287         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6288         if (ret != 0)
6289                 goto fail_unlock;
6290 
6291         inode_init_owner(inode, dir, mode);
6292         inode_set_bytes(inode, 0);
6293 
6294         inode->i_mtime = current_time(inode);
6295         inode->i_atime = inode->i_mtime;
6296         inode->i_ctime = inode->i_mtime;
6297         BTRFS_I(inode)->i_otime = inode->i_mtime;
6298 
6299         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6300                                   struct btrfs_inode_item);
6301         memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6302                              sizeof(*inode_item));
6303         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6304 
6305         if (name) {
6306                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6307                                      struct btrfs_inode_ref);
6308                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6309                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6310                 ptr = (unsigned long)(ref + 1);
6311                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6312         }
6313 
6314         btrfs_mark_buffer_dirty(path->nodes[0]);
6315         btrfs_free_path(path);
6316 
6317         btrfs_inherit_iflags(inode, dir);
6318 
6319         if (S_ISREG(mode)) {
6320                 if (btrfs_test_opt(fs_info, NODATASUM))
6321                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6322                 if (btrfs_test_opt(fs_info, NODATACOW))
6323                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6324                                 BTRFS_INODE_NODATASUM;
6325         }
6326 
6327         inode_tree_add(inode);
6328 
6329         trace_btrfs_inode_new(inode);
6330         btrfs_set_inode_last_trans(trans, inode);
6331 
6332         btrfs_update_root_times(trans, root);
6333 
6334         ret = btrfs_inode_inherit_props(trans, inode, dir);
6335         if (ret)
6336                 btrfs_err(fs_info,
6337                           "error inheriting props for ino %llu (root %llu): %d",
6338                         btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
6339 
6340         return inode;
6341 
6342 fail_unlock:
6343         discard_new_inode(inode);
6344 fail:
6345         if (dir && name)
6346                 BTRFS_I(dir)->index_cnt--;
6347         btrfs_free_path(path);
6348         return ERR_PTR(ret);
6349 }
6350 
6351 static inline u8 btrfs_inode_type(struct inode *inode)
6352 {
6353         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6354 }
6355 
6356 /*
6357  * utility function to add 'inode' into 'parent_inode' with
6358  * a give name and a given sequence number.
6359  * if 'add_backref' is true, also insert a backref from the
6360  * inode to the parent directory.
6361  */
6362 int btrfs_add_link(struct btrfs_trans_handle *trans,
6363                    struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6364                    const char *name, int name_len, int add_backref, u64 index)
6365 {
6366         int ret = 0;
6367         struct btrfs_key key;
6368         struct btrfs_root *root = parent_inode->root;
6369         u64 ino = btrfs_ino(inode);
6370         u64 parent_ino = btrfs_ino(parent_inode);
6371 
6372         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6373                 memcpy(&key, &inode->root->root_key, sizeof(key));
6374         } else {
6375                 key.objectid = ino;
6376                 key.type = BTRFS_INODE_ITEM_KEY;
6377                 key.offset = 0;
6378         }
6379 
6380         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6381                 ret = btrfs_add_root_ref(trans, key.objectid,
6382                                          root->root_key.objectid, parent_ino,
6383                                          index, name, name_len);
6384         } else if (add_backref) {
6385                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6386                                              parent_ino, index);
6387         }
6388 
6389         /* Nothing to clean up yet */
6390         if (ret)
6391                 return ret;
6392 
6393         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6394                                     parent_inode, &key,
6395                                     btrfs_inode_type(&inode->vfs_inode), index);
6396         if (ret == -EEXIST || ret == -EOVERFLOW)
6397                 goto fail_dir_item;
6398         else if (ret) {
6399                 btrfs_abort_transaction(trans, ret);
6400                 return ret;
6401         }
6402 
6403         btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6404                            name_len * 2);
6405         inode_inc_iversion(&parent_inode->vfs_inode);
6406         parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime =
6407                 current_time(&parent_inode->vfs_inode);
6408         ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
6409         if (ret)
6410                 btrfs_abort_transaction(trans, ret);
6411         return ret;
6412 
6413 fail_dir_item:
6414         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6415                 u64 local_index;
6416                 int err;
6417                 err = btrfs_del_root_ref(trans, key.objectid,
6418                                          root->root_key.objectid, parent_ino,
6419                                          &local_index, name, name_len);
6420 
6421         } else if (add_backref) {
6422                 u64 local_index;
6423                 int err;
6424 
6425                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6426                                           ino, parent_ino, &local_index);
6427         }
6428         return ret;
6429 }
6430 
6431 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6432                             struct btrfs_inode *dir, struct dentry *dentry,
6433                             struct btrfs_inode *inode, int backref, u64 index)
6434 {
6435         int err = btrfs_add_link(trans, dir, inode,
6436                                  dentry->d_name.name, dentry->d_name.len,
6437                                  backref, index);
6438         if (err > 0)
6439                 err = -EEXIST;
6440         return err;
6441 }
6442 
6443 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6444                         umode_t mode, dev_t rdev)
6445 {
6446         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6447         struct btrfs_trans_handle *trans;
6448         struct btrfs_root *root = BTRFS_I(dir)->root;
6449         struct inode *inode = NULL;
6450         int err;
6451         u64 objectid;
6452         u64 index = 0;
6453 
6454         /*
6455          * 2 for inode item and ref
6456          * 2 for dir items
6457          * 1 for xattr if selinux is on
6458          */
6459         trans = btrfs_start_transaction(root, 5);
6460         if (IS_ERR(trans))
6461                 return PTR_ERR(trans);
6462 
6463         err = btrfs_find_free_ino(root, &objectid);
6464         if (err)
6465                 goto out_unlock;
6466 
6467         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6468                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6469                         mode, &index);
6470         if (IS_ERR(inode)) {
6471                 err = PTR_ERR(inode);
6472                 inode = NULL;
6473                 goto out_unlock;
6474         }
6475 
6476         /*
6477         * If the active LSM wants to access the inode during
6478         * d_instantiate it needs these. Smack checks to see
6479         * if the filesystem supports xattrs by looking at the
6480         * ops vector.
6481         */
6482         inode->i_op = &btrfs_special_inode_operations;
6483         init_special_inode(inode, inode->i_mode, rdev);
6484 
6485         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6486         if (err)
6487                 goto out_unlock;
6488 
6489         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6490                         0, index);
6491         if (err)
6492                 goto out_unlock;
6493 
6494         btrfs_update_inode(trans, root, inode);
6495         d_instantiate_new(dentry, inode);
6496 
6497 out_unlock:
6498         btrfs_end_transaction(trans);
6499         btrfs_btree_balance_dirty(fs_info);
6500         if (err && inode) {
6501                 inode_dec_link_count(inode);
6502                 discard_new_inode(inode);
6503         }
6504         return err;
6505 }
6506 
6507 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6508                         umode_t mode, bool excl)
6509 {
6510         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6511         struct btrfs_trans_handle *trans;
6512         struct btrfs_root *root = BTRFS_I(dir)->root;
6513         struct inode *inode = NULL;
6514         int err;
6515         u64 objectid;
6516         u64 index = 0;
6517 
6518         /*
6519          * 2 for inode item and ref
6520          * 2 for dir items
6521          * 1 for xattr if selinux is on
6522          */
6523         trans = btrfs_start_transaction(root, 5);
6524         if (IS_ERR(trans))
6525                 return PTR_ERR(trans);
6526 
6527         err = btrfs_find_free_ino(root, &objectid);
6528         if (err)
6529                 goto out_unlock;
6530 
6531         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6532                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6533                         mode, &index);
6534         if (IS_ERR(inode)) {
6535                 err = PTR_ERR(inode);
6536                 inode = NULL;
6537                 goto out_unlock;
6538         }
6539         /*
6540         * If the active LSM wants to access the inode during
6541         * d_instantiate it needs these. Smack checks to see
6542         * if the filesystem supports xattrs by looking at the
6543         * ops vector.
6544         */
6545         inode->i_fop = &btrfs_file_operations;
6546         inode->i_op = &btrfs_file_inode_operations;
6547         inode->i_mapping->a_ops = &btrfs_aops;
6548 
6549         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6550         if (err)
6551                 goto out_unlock;
6552 
6553         err = btrfs_update_inode(trans, root, inode);
6554         if (err)
6555                 goto out_unlock;
6556 
6557         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6558                         0, index);
6559         if (err)
6560                 goto out_unlock;
6561 
6562         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6563         d_instantiate_new(dentry, inode);
6564 
6565 out_unlock:
6566         btrfs_end_transaction(trans);
6567         if (err && inode) {
6568                 inode_dec_link_count(inode);
6569                 discard_new_inode(inode);
6570         }
6571         btrfs_btree_balance_dirty(fs_info);
6572         return err;
6573 }
6574 
6575 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6576                       struct dentry *dentry)
6577 {
6578         struct btrfs_trans_handle *trans = NULL;
6579         struct btrfs_root *root = BTRFS_I(dir)->root;
6580         struct inode *inode = d_inode(old_dentry);
6581         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6582         u64 index;
6583         int err;
6584         int drop_inode = 0;
6585 
6586         /* do not allow sys_link's with other subvols of the same device */
6587         if (root->objectid != BTRFS_I(inode)->root->objectid)
6588                 return -EXDEV;
6589 
6590         if (inode->i_nlink >= BTRFS_LINK_MAX)
6591                 return -EMLINK;
6592 
6593         err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6594         if (err)
6595                 goto fail;
6596 
6597         /*
6598          * 2 items for inode and inode ref
6599          * 2 items for dir items
6600          * 1 item for parent inode
6601          * 1 item for orphan item deletion if O_TMPFILE
6602          */
6603         trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6604         if (IS_ERR(trans)) {
6605                 err = PTR_ERR(trans);
6606                 trans = NULL;
6607                 goto fail;
6608         }
6609 
6610         /* There are several dir indexes for this inode, clear the cache. */
6611         BTRFS_I(inode)->dir_index = 0ULL;
6612         inc_nlink(inode);
6613         inode_inc_iversion(inode);
6614         inode->i_ctime = current_time(inode);
6615         ihold(inode);
6616         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6617 
6618         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6619                         1, index);
6620 
6621         if (err) {
6622                 drop_inode = 1;
6623         } else {
6624                 struct dentry *parent = dentry->d_parent;
6625                 int ret;
6626 
6627                 err = btrfs_update_inode(trans, root, inode);
6628                 if (err)
6629                         goto fail;
6630                 if (inode->i_nlink == 1) {
6631                         /*
6632                          * If new hard link count is 1, it's a file created
6633                          * with open(2) O_TMPFILE flag.
6634                          */
6635                         err = btrfs_orphan_del(trans, BTRFS_I(inode));
6636                         if (err)
6637                                 goto fail;
6638                 }
6639                 d_instantiate(dentry, inode);
6640                 ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
6641                                          true, NULL);
6642                 if (ret == BTRFS_NEED_TRANS_COMMIT) {
6643                         err = btrfs_commit_transaction(trans);
6644                         trans = NULL;
6645                 }
6646         }
6647 
6648 fail:
6649         if (trans)
6650                 btrfs_end_transaction(trans);
6651         if (drop_inode) {
6652                 inode_dec_link_count(inode);
6653                 iput(inode);
6654         }
6655         btrfs_btree_balance_dirty(fs_info);
6656         return err;
6657 }
6658 
6659 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6660 {
6661         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6662         struct inode *inode = NULL;
6663         struct btrfs_trans_handle *trans;
6664         struct btrfs_root *root = BTRFS_I(dir)->root;
6665         int err = 0;
6666         int drop_on_err = 0;
6667         u64 objectid = 0;
6668         u64 index = 0;
6669 
6670         /*
6671          * 2 items for inode and ref
6672          * 2 items for dir items
6673          * 1 for xattr if selinux is on
6674          */
6675         trans = btrfs_start_transaction(root, 5);
6676         if (IS_ERR(trans))
6677                 return PTR_ERR(trans);
6678 
6679         err = btrfs_find_free_ino(root, &objectid);
6680         if (err)
6681                 goto out_fail;
6682 
6683         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6684                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6685                         S_IFDIR | mode, &index);
6686         if (IS_ERR(inode)) {
6687                 err = PTR_ERR(inode);
6688                 inode = NULL;
6689                 goto out_fail;
6690         }
6691 
6692         drop_on_err = 1;
6693         /* these must be set before we unlock the inode */
6694         inode->i_op = &btrfs_dir_inode_operations;
6695         inode->i_fop = &btrfs_dir_file_operations;
6696 
6697         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6698         if (err)
6699                 goto out_fail;
6700 
6701         btrfs_i_size_write(BTRFS_I(inode), 0);
6702         err = btrfs_update_inode(trans, root, inode);
6703         if (err)
6704                 goto out_fail;
6705 
6706         err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6707                         dentry->d_name.name,
6708                         dentry->d_name.len, 0, index);
6709         if (err)
6710                 goto out_fail;
6711 
6712         d_instantiate_new(dentry, inode);
6713         drop_on_err = 0;
6714 
6715 out_fail:
6716         btrfs_end_transaction(trans);
6717         if (err && inode) {
6718                 inode_dec_link_count(inode);
6719                 discard_new_inode(inode);
6720         }
6721         btrfs_btree_balance_dirty(fs_info);
6722         return err;
6723 }
6724 
6725 static noinline int uncompress_inline(struct btrfs_path *path,
6726                                       struct page *page,
6727                                       size_t pg_offset, u64 extent_offset,
6728                                       struct btrfs_file_extent_item *item)
6729 {
6730         int ret;
6731         struct extent_buffer *leaf = path->nodes[0];
6732         char *tmp;
6733         size_t max_size;
6734         unsigned long inline_size;
6735         unsigned long ptr;
6736         int compress_type;
6737 
6738         WARN_ON(pg_offset != 0);
6739         compress_type = btrfs_file_extent_compression(leaf, item);
6740         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6741         inline_size = btrfs_file_extent_inline_item_len(leaf,
6742                                         btrfs_item_nr(path->slots[0]));
6743         tmp = kmalloc(inline_size, GFP_NOFS);
6744         if (!tmp)
6745                 return -ENOMEM;
6746         ptr = btrfs_file_extent_inline_start(item);
6747 
6748         read_extent_buffer(leaf, tmp, ptr, inline_size);
6749 
6750         max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6751         ret = btrfs_decompress(compress_type, tmp, page,
6752                                extent_offset, inline_size, max_size);
6753 
6754         /*
6755          * decompression code contains a memset to fill in any space between the end
6756          * of the uncompressed data and the end of max_size in case the decompressed
6757          * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6758          * the end of an inline extent and the beginning of the next block, so we
6759          * cover that region here.
6760          */
6761 
6762         if (max_size + pg_offset < PAGE_SIZE) {
6763                 char *map = kmap(page);
6764                 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
6765                 kunmap(page);
6766         }
6767         kfree(tmp);
6768         return ret;
6769 }
6770 
6771 /*
6772  * a bit scary, this does extent mapping from logical file offset to the disk.
6773  * the ugly parts come from merging extents from the disk with the in-ram
6774  * representation.  This gets more complex because of the data=ordered code,
6775  * where the in-ram extents might be locked pending data=ordered completion.
6776  *
6777  * This also copies inline extents directly into the page.
6778  */
6779 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6780                 struct page *page,
6781             size_t pg_offset, u64 start, u64 len,
6782                 int create)
6783 {
6784         struct btrfs_fs_info *fs_info = inode->root->fs_info;
6785         int ret;
6786         int err = 0;
6787         u64 extent_start = 0;
6788         u64 extent_end = 0;
6789         u64 objectid = btrfs_ino(inode);
6790         u32 found_type;
6791         struct btrfs_path *path = NULL;
6792         struct btrfs_root *root = inode->root;
6793         struct btrfs_file_extent_item *item;
6794         struct extent_buffer *leaf;
6795         struct btrfs_key found_key;
6796         struct extent_map *em = NULL;
6797         struct extent_map_tree *em_tree = &inode->extent_tree;
6798         struct extent_io_tree *io_tree = &inode->io_tree;
6799         const bool new_inline = !page || create;
6800 
6801         read_lock(&em_tree->lock);
6802         em = lookup_extent_mapping(em_tree, start, len);
6803         if (em)
6804                 em->bdev = fs_info->fs_devices->latest_bdev;
6805         read_unlock(&em_tree->lock);
6806 
6807         if (em) {
6808                 if (em->start > start || em->start + em->len <= start)
6809                         free_extent_map(em);
6810                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6811                         free_extent_map(em);
6812                 else
6813                         goto out;
6814         }
6815         em = alloc_extent_map();
6816         if (!em) {
6817                 err = -ENOMEM;
6818                 goto out;
6819         }
6820         em->bdev = fs_info->fs_devices->latest_bdev;
6821         em->start = EXTENT_MAP_HOLE;
6822         em->orig_start = EXTENT_MAP_HOLE;
6823         em->len = (u64)-1;
6824         em->block_len = (u64)-1;
6825 
6826         if (!path) {
6827                 path = btrfs_alloc_path();
6828                 if (!path) {
6829                         err = -ENOMEM;
6830                         goto out;
6831                 }
6832                 /*
6833                  * Chances are we'll be called again, so go ahead and do
6834                  * readahead
6835                  */
6836                 path->reada = READA_FORWARD;
6837         }
6838 
6839         ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6840         if (ret < 0) {
6841                 err = ret;
6842                 goto out;
6843         }
6844 
6845         if (ret != 0) {
6846                 if (path->slots[0] == 0)
6847                         goto not_found;
6848                 path->slots[0]--;
6849         }
6850 
6851         leaf = path->nodes[0];
6852         item = btrfs_item_ptr(leaf, path->slots[0],
6853                               struct btrfs_file_extent_item);
6854         /* are we inside the extent that was found? */
6855         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6856         found_type = found_key.type;
6857         if (found_key.objectid != objectid ||
6858             found_type != BTRFS_EXTENT_DATA_KEY) {
6859                 /*
6860                  * If we backup past the first extent we want to move forward
6861                  * and see if there is an extent in front of us, otherwise we'll
6862                  * say there is a hole for our whole search range which can
6863                  * cause problems.
6864                  */
6865                 extent_end = start;
6866                 goto next;
6867         }
6868 
6869         found_type = btrfs_file_extent_type(leaf, item);
6870         extent_start = found_key.offset;
6871         if (found_type == BTRFS_FILE_EXTENT_REG ||
6872             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6873                 extent_end = extent_start +
6874                        btrfs_file_extent_num_bytes(leaf, item);
6875 
6876                 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6877                                                        extent_start);
6878         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6879                 size_t size;
6880 
6881                 size = btrfs_file_extent_ram_bytes(leaf, item);
6882                 extent_end = ALIGN(extent_start + size,
6883                                    fs_info->sectorsize);
6884 
6885                 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6886                                                       path->slots[0],
6887                                                       extent_start);
6888         }
6889 next:
6890         if (start >= extent_end) {
6891                 path->slots[0]++;
6892                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6893                         ret = btrfs_next_leaf(root, path);
6894                         if (ret < 0) {
6895                                 err = ret;
6896                                 goto out;
6897                         }
6898                         if (ret > 0)
6899                                 goto not_found;
6900                         leaf = path->nodes[0];
6901                 }
6902                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6903                 if (found_key.objectid != objectid ||
6904                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6905                         goto not_found;
6906                 if (start + len <= found_key.offset)
6907                         goto not_found;
6908                 if (start > found_key.offset)
6909                         goto next;
6910                 em->start = start;
6911                 em->orig_start = start;
6912                 em->len = found_key.offset - start;
6913                 goto not_found_em;
6914         }
6915 
6916         btrfs_extent_item_to_extent_map(inode, path, item,
6917                         new_inline, em);
6918 
6919         if (found_type == BTRFS_FILE_EXTENT_REG ||
6920             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6921                 goto insert;
6922         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6923                 unsigned long ptr;
6924                 char *map;
6925                 size_t size;
6926                 size_t extent_offset;
6927                 size_t copy_size;
6928 
6929                 if (new_inline)
6930                         goto out;
6931 
6932                 size = btrfs_file_extent_ram_bytes(leaf, item);
6933                 extent_offset = page_offset(page) + pg_offset - extent_start;
6934                 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6935                                   size - extent_offset);
6936                 em->start = extent_start + extent_offset;
6937                 em->len = ALIGN(copy_size, fs_info->sectorsize);
6938                 em->orig_block_len = em->len;
6939                 em->orig_start = em->start;
6940                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6941                 if (!PageUptodate(page)) {
6942                         if (btrfs_file_extent_compression(leaf, item) !=
6943                             BTRFS_COMPRESS_NONE) {
6944                                 ret = uncompress_inline(path, page, pg_offset,
6945                                                         extent_offset, item);
6946                                 if (ret) {
6947                                         err = ret;
6948                                         goto out;
6949                                 }
6950                         } else {
6951                                 map = kmap(page);
6952                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6953                                                    copy_size);
6954                                 if (pg_offset + copy_size < PAGE_SIZE) {
6955                                         memset(map + pg_offset + copy_size, 0,
6956                                                PAGE_SIZE - pg_offset -
6957                                                copy_size);
6958                                 }
6959                                 kunmap(page);
6960                         }
6961                         flush_dcache_page(page);
6962                 }
6963                 set_extent_uptodate(io_tree, em->start,
6964                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6965                 goto insert;
6966         }
6967 not_found:
6968         em->start = start;
6969         em->orig_start = start;
6970         em->len = len;
6971 not_found_em:
6972         em->block_start = EXTENT_MAP_HOLE;
6973 insert:
6974         btrfs_release_path(path);
6975         if (em->start > start || extent_map_end(em) <= start) {
6976                 btrfs_err(fs_info,
6977                           "bad extent! em: [%llu %llu] passed [%llu %llu]",
6978                           em->start, em->len, start, len);
6979                 err = -EIO;
6980                 goto out;
6981         }
6982 
6983         err = 0;
6984         write_lock(&em_tree->lock);
6985         err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
6986         write_unlock(&em_tree->lock);
6987 out:
6988 
6989         trace_btrfs_get_extent(root, inode, em);
6990 
6991         btrfs_free_path(path);
6992         if (err) {
6993                 free_extent_map(em);
6994                 return ERR_PTR(err);
6995         }
6996         BUG_ON(!em); /* Error is always set */
6997         return em;
6998 }
6999 
7000 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7001                 struct page *page,
7002                 size_t pg_offset, u64 start, u64 len,
7003                 int create)
7004 {
7005         struct extent_map *em;
7006         struct extent_map *hole_em = NULL;
7007         u64 range_start = start;
7008         u64 end;
7009         u64 found;
7010         u64 found_end;
7011         int err = 0;
7012 
7013         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7014         if (IS_ERR(em))
7015                 return em;
7016         /*
7017          * If our em maps to:
7018          * - a hole or
7019          * - a pre-alloc extent,
7020          * there might actually be delalloc bytes behind it.
7021          */
7022         if (em->block_start != EXTENT_MAP_HOLE &&
7023             !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7024                 return em;
7025         else
7026                 hole_em = em;
7027 
7028         /* check to see if we've wrapped (len == -1 or similar) */
7029         end = start + len;
7030         if (end < start)
7031                 end = (u64)-1;
7032         else
7033                 end -= 1;
7034 
7035         em = NULL;
7036 
7037         /* ok, we didn't find anything, lets look for delalloc */
7038         found = count_range_bits(&inode->io_tree, &range_start,
7039                                  end, len, EXTENT_DELALLOC, 1);
7040         found_end = range_start + found;
7041         if (found_end < range_start)
7042                 found_end = (u64)-1;
7043 
7044         /*
7045          * we didn't find anything useful, return
7046          * the original results from get_extent()
7047          */
7048         if (range_start > end || found_end <= start) {
7049                 em = hole_em;
7050                 hole_em = NULL;
7051                 goto out;
7052         }
7053 
7054         /* adjust the range_start to make sure it doesn't
7055          * go backwards from the start they passed in
7056          */
7057         range_start = max(start, range_start);
7058         found = found_end - range_start;
7059 
7060         if (found > 0) {
7061                 u64 hole_start = start;
7062                 u64 hole_len = len;
7063 
7064                 em = alloc_extent_map();
7065                 if (!em) {
7066                         err = -ENOMEM;
7067                         goto out;
7068                 }
7069                 /*
7070                  * when btrfs_get_extent can't find anything it
7071                  * returns one huge hole
7072                  *
7073                  * make sure what it found really fits our range, and
7074                  * adjust to make sure it is based on the start from
7075                  * the caller
7076                  */
7077                 if (hole_em) {
7078                         u64 calc_end = extent_map_end(hole_em);
7079 
7080                         if (calc_end <= start || (hole_em->start > end)) {
7081                                 free_extent_map(hole_em);
7082                                 hole_em = NULL;
7083                         } else {
7084                                 hole_start = max(hole_em->start, start);
7085                                 hole_len = calc_end - hole_start;
7086                         }
7087                 }
7088                 em->bdev = NULL;
7089                 if (hole_em && range_start > hole_start) {
7090                         /* our hole starts before our delalloc, so we
7091                          * have to return just the parts of the hole
7092                          * that go until  the delalloc starts
7093                          */
7094                         em->len = min(hole_len,
7095                                       range_start - hole_start);
7096                         em->start = hole_start;
7097                         em->orig_start = hole_start;
7098                         /*
7099                          * don't adjust block start at all,
7100                          * it is fixed at EXTENT_MAP_HOLE
7101                          */
7102                         em->block_start = hole_em->block_start;
7103                         em->block_len = hole_len;
7104                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7105                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7106                 } else {
7107                         em->start = range_start;
7108                         em->len = found;
7109                         em->orig_start = range_start;
7110                         em->block_start = EXTENT_MAP_DELALLOC;
7111                         em->block_len = found;
7112                 }
7113         } else {
7114                 return hole_em;
7115         }
7116 out:
7117 
7118         free_extent_map(hole_em);
7119         if (err) {
7120                 free_extent_map(em);
7121                 return ERR_PTR(err);
7122         }
7123         return em;
7124 }
7125 
7126 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7127                                                   const u64 start,
7128                                                   const u64 len,
7129                                                   const u64 orig_start,
7130                                                   const u64 block_start,
7131                                                   const u64 block_len,
7132                                                   const u64 orig_block_len,
7133                                                   const u64 ram_bytes,
7134                                                   const int type)
7135 {
7136         struct extent_map *em = NULL;
7137         int ret;
7138 
7139         if (type != BTRFS_ORDERED_NOCOW) {
7140                 em = create_io_em(inode, start, len, orig_start,
7141                                   block_start, block_len, orig_block_len,
7142                                   ram_bytes,
7143                                   BTRFS_COMPRESS_NONE, /* compress_type */
7144                                   type);
7145                 if (IS_ERR(em))
7146                         goto out;
7147         }
7148         ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
7149                                            len, block_len, type);
7150         if (ret) {
7151                 if (em) {
7152                         free_extent_map(em);
7153                         btrfs_drop_extent_cache(BTRFS_I(inode), start,
7154                                                 start + len - 1, 0);
7155                 }
7156                 em = ERR_PTR(ret);
7157         }
7158  out:
7159 
7160         return em;
7161 }
7162 
7163 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7164                                                   u64 start, u64 len)
7165 {
7166         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7167         struct btrfs_root *root = BTRFS_I(inode)->root;
7168         struct extent_map *em;
7169         struct btrfs_key ins;
7170         u64 alloc_hint;
7171         int ret;
7172 
7173         alloc_hint = get_extent_allocation_hint(inode, start, len);
7174         ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7175                                    0, alloc_hint, &ins, 1, 1);
7176         if (ret)
7177                 return ERR_PTR(ret);
7178 
7179         em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7180                                      ins.objectid, ins.offset, ins.offset,
7181                                      ins.offset, BTRFS_ORDERED_REGULAR);
7182         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7183         if (IS_ERR(em))
7184                 btrfs_free_reserved_extent(fs_info, ins.objectid,
7185                                            ins.offset, 1);
7186 
7187         return em;
7188 }
7189 
7190 /*
7191  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7192  * block must be cow'd
7193  */
7194 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7195                               u64 *orig_start, u64 *orig_block_len,
7196                               u64 *ram_bytes)
7197 {
7198         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7199         struct btrfs_path *path;
7200         int ret;
7201         struct extent_buffer *leaf;
7202         struct btrfs_root *root = BTRFS_I(inode)->root;
7203         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7204         struct btrfs_file_extent_item *fi;
7205         struct btrfs_key key;
7206         u64 disk_bytenr;
7207         u64 backref_offset;
7208         u64 extent_end;
7209         u64 num_bytes;
7210         int slot;
7211         int found_type;
7212         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7213 
7214         path = btrfs_alloc_path();
7215         if (!path)
7216                 return -ENOMEM;
7217 
7218         ret = btrfs_lookup_file_extent(NULL, root, path,
7219                         btrfs_ino(BTRFS_I(inode)), offset, 0);
7220         if (ret < 0)
7221                 goto out;
7222 
7223         slot = path->slots[0];
7224         if (ret == 1) {
7225                 if (slot == 0) {
7226                         /* can't find the item, must cow */
7227                         ret = 0;
7228                         goto out;
7229                 }
7230                 slot--;
7231         }
7232         ret = 0;
7233         leaf = path->nodes[0];
7234         btrfs_item_key_to_cpu(leaf, &key, slot);
7235         if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7236             key.type != BTRFS_EXTENT_DATA_KEY) {
7237                 /* not our file or wrong item type, must cow */
7238                 goto out;
7239         }
7240 
7241         if (key.offset > offset) {
7242                 /* Wrong offset, must cow */
7243                 goto out;
7244         }
7245 
7246         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7247         found_type = btrfs_file_extent_type(leaf, fi);
7248         if (found_type != BTRFS_FILE_EXTENT_REG &&
7249             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7250                 /* not a regular extent, must cow */
7251                 goto out;
7252         }
7253 
7254         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7255                 goto out;
7256 
7257         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7258         if (extent_end <= offset)
7259                 goto out;
7260 
7261         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7262         if (disk_bytenr == 0)
7263                 goto out;
7264 
7265         if (btrfs_file_extent_compression(leaf, fi) ||
7266             btrfs_file_extent_encryption(leaf, fi) ||
7267             btrfs_file_extent_other_encoding(leaf, fi))
7268                 goto out;
7269 
7270         /*
7271          * Do the same check as in btrfs_cross_ref_exist but without the
7272          * unnecessary search.
7273          */
7274         if (btrfs_file_extent_generation(leaf, fi) <=
7275             btrfs_root_last_snapshot(&root->root_item))
7276                 goto out;
7277 
7278         backref_offset = btrfs_file_extent_offset(leaf, fi);
7279 
7280         if (orig_start) {
7281                 *orig_start = key.offset - backref_offset;
7282                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7283                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7284         }
7285 
7286         if (btrfs_extent_readonly(fs_info, disk_bytenr))
7287                 goto out;
7288 
7289         num_bytes = min(offset + *len, extent_end) - offset;
7290         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7291                 u64 range_end;
7292 
7293                 range_end = round_up(offset + num_bytes,
7294                                      root->fs_info->sectorsize) - 1;
7295                 ret = test_range_bit(io_tree, offset, range_end,
7296                                      EXTENT_DELALLOC, 0, NULL);
7297                 if (ret) {
7298                         ret = -EAGAIN;
7299                         goto out;
7300                 }
7301         }
7302 
7303         btrfs_release_path(path);
7304 
7305         /*
7306          * look for other files referencing this extent, if we
7307          * find any we must cow
7308          */
7309 
7310         ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
7311                                     key.offset - backref_offset, disk_bytenr);
7312         if (ret) {
7313                 ret = 0;
7314                 goto out;
7315         }
7316 
7317         /*
7318          * adjust disk_bytenr and num_bytes to cover just the bytes
7319          * in this extent we are about to write.  If there
7320          * are any csums in that range we have to cow in order
7321          * to keep the csums correct
7322          */
7323         disk_bytenr += backref_offset;
7324         disk_bytenr += offset - key.offset;
7325         if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
7326                 goto out;
7327         /*
7328          * all of the above have passed, it is safe to overwrite this extent
7329          * without cow
7330          */
7331         *len = num_bytes;
7332         ret = 1;
7333 out:
7334         btrfs_free_path(path);
7335         return ret;
7336 }
7337 
7338 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7339                               struct extent_state **cached_state, int writing)
7340 {
7341         struct btrfs_ordered_extent *ordered;
7342         int ret = 0;
7343 
7344         while (1) {
7345                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7346                                  cached_state);
7347                 /*
7348                  * We're concerned with the entire range that we're going to be
7349                  * doing DIO to, so we need to make sure there's no ordered
7350                  * extents in this range.
7351                  */
7352                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7353                                                      lockend - lockstart + 1);
7354 
7355                 /*
7356                  * We need to make sure there are no buffered pages in this
7357                  * range either, we could have raced between the invalidate in
7358                  * generic_file_direct_write and locking the extent.  The
7359                  * invalidate needs to happen so that reads after a write do not
7360                  * get stale data.
7361                  */
7362                 if (!ordered &&
7363                     (!writing || !filemap_range_has_page(inode->i_mapping,
7364                                                          lockstart, lockend)))
7365                         break;
7366 
7367                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7368                                      cached_state);
7369 
7370                 if (ordered) {
7371                         /*
7372                          * If we are doing a DIO read and the ordered extent we
7373                          * found is for a buffered write, we can not wait for it
7374                          * to complete and retry, because if we do so we can
7375                          * deadlock with concurrent buffered writes on page
7376                          * locks. This happens only if our DIO read covers more
7377                          * than one extent map, if at this point has already
7378                          * created an ordered extent for a previous extent map
7379                          * and locked its range in the inode's io tree, and a
7380                          * concurrent write against that previous extent map's
7381                          * range and this range started (we unlock the ranges
7382                          * in the io tree only when the bios complete and
7383                          * buffered writes always lock pages before attempting
7384                          * to lock range in the io tree).
7385                          */
7386                         if (writing ||
7387                             test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7388                                 btrfs_start_ordered_extent(inode, ordered, 1);
7389                         else
7390                                 ret = -ENOTBLK;
7391                         btrfs_put_ordered_extent(ordered);
7392                 } else {
7393                         /*
7394                          * We could trigger writeback for this range (and wait
7395                          * for it to complete) and then invalidate the pages for
7396                          * this range (through invalidate_inode_pages2_range()),
7397                          * but that can lead us to a deadlock with a concurrent
7398                          * call to readpages() (a buffered read or a defrag call
7399                          * triggered a readahead) on a page lock due to an
7400                          * ordered dio extent we created before but did not have
7401                          * yet a corresponding bio submitted (whence it can not
7402                          * complete), which makes readpages() wait for that
7403                          * ordered extent to complete while holding a lock on
7404                          * that page.
7405                          */
7406                         ret = -ENOTBLK;
7407                 }
7408 
7409                 if (ret)
7410                         break;
7411 
7412                 cond_resched();
7413         }
7414 
7415         return ret;
7416 }
7417 
7418 /* The callers of this must take lock_extent() */
7419 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
7420                                        u64 orig_start, u64 block_start,
7421                                        u64 block_len, u64 orig_block_len,
7422                                        u64 ram_bytes, int compress_type,
7423                                        int type)
7424 {
7425         struct extent_map_tree *em_tree;
7426         struct extent_map *em;
7427         struct btrfs_root *root = BTRFS_I(inode)->root;
7428         int ret;
7429 
7430         ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7431                type == BTRFS_ORDERED_COMPRESSED ||
7432                type == BTRFS_ORDERED_NOCOW ||
7433                type == BTRFS_ORDERED_REGULAR);
7434 
7435         em_tree = &BTRFS_I(inode)->extent_tree;
7436         em = alloc_extent_map();
7437         if (!em)
7438                 return ERR_PTR(-ENOMEM);
7439 
7440         em->start = start;
7441         em->orig_start = orig_start;
7442         em->len = len;
7443         em->block_len = block_len;
7444         em->block_start = block_start;
7445         em->bdev = root->fs_info->fs_devices->latest_bdev;
7446         em->orig_block_len = orig_block_len;
7447         em->ram_bytes = ram_bytes;
7448         em->generation = -1;
7449         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7450         if (type == BTRFS_ORDERED_PREALLOC) {
7451                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7452         } else if (type == BTRFS_ORDERED_COMPRESSED) {
7453                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);