~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/btrfs/inode.c

Version: ~ [ linux-5.19-rc8 ] ~ [ linux-5.18.14 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.57 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.133 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.207 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.253 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.289 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.324 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2007 Oracle.  All rights reserved.
  4  */
  5 
  6 #include <linux/kernel.h>
  7 #include <linux/bio.h>
  8 #include <linux/buffer_head.h>
  9 #include <linux/file.h>
 10 #include <linux/fs.h>
 11 #include <linux/pagemap.h>
 12 #include <linux/highmem.h>
 13 #include <linux/time.h>
 14 #include <linux/init.h>
 15 #include <linux/string.h>
 16 #include <linux/backing-dev.h>
 17 #include <linux/writeback.h>
 18 #include <linux/compat.h>
 19 #include <linux/xattr.h>
 20 #include <linux/posix_acl.h>
 21 #include <linux/falloc.h>
 22 #include <linux/slab.h>
 23 #include <linux/ratelimit.h>
 24 #include <linux/btrfs.h>
 25 #include <linux/blkdev.h>
 26 #include <linux/posix_acl_xattr.h>
 27 #include <linux/uio.h>
 28 #include <linux/magic.h>
 29 #include <linux/iversion.h>
 30 #include <asm/unaligned.h>
 31 #include "ctree.h"
 32 #include "disk-io.h"
 33 #include "transaction.h"
 34 #include "btrfs_inode.h"
 35 #include "print-tree.h"
 36 #include "ordered-data.h"
 37 #include "xattr.h"
 38 #include "tree-log.h"
 39 #include "volumes.h"
 40 #include "compression.h"
 41 #include "locking.h"
 42 #include "free-space-cache.h"
 43 #include "inode-map.h"
 44 #include "backref.h"
 45 #include "props.h"
 46 #include "qgroup.h"
 47 #include "dedupe.h"
 48 
 49 struct btrfs_iget_args {
 50         struct btrfs_key *location;
 51         struct btrfs_root *root;
 52 };
 53 
 54 struct btrfs_dio_data {
 55         u64 reserve;
 56         u64 unsubmitted_oe_range_start;
 57         u64 unsubmitted_oe_range_end;
 58         int overwrite;
 59 };
 60 
 61 static const struct inode_operations btrfs_dir_inode_operations;
 62 static const struct inode_operations btrfs_symlink_inode_operations;
 63 static const struct inode_operations btrfs_dir_ro_inode_operations;
 64 static const struct inode_operations btrfs_special_inode_operations;
 65 static const struct inode_operations btrfs_file_inode_operations;
 66 static const struct address_space_operations btrfs_aops;
 67 static const struct file_operations btrfs_dir_file_operations;
 68 static const struct extent_io_ops btrfs_extent_io_ops;
 69 
 70 static struct kmem_cache *btrfs_inode_cachep;
 71 struct kmem_cache *btrfs_trans_handle_cachep;
 72 struct kmem_cache *btrfs_path_cachep;
 73 struct kmem_cache *btrfs_free_space_cachep;
 74 
 75 #define S_SHIFT 12
 76 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
 77         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
 78         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
 79         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
 80         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
 81         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
 82         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
 83         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
 84 };
 85 
 86 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
 87 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
 88 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 89 static noinline int cow_file_range(struct inode *inode,
 90                                    struct page *locked_page,
 91                                    u64 start, u64 end, u64 delalloc_end,
 92                                    int *page_started, unsigned long *nr_written,
 93                                    int unlock, struct btrfs_dedupe_hash *hash);
 94 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
 95                                        u64 orig_start, u64 block_start,
 96                                        u64 block_len, u64 orig_block_len,
 97                                        u64 ram_bytes, int compress_type,
 98                                        int type);
 99 
100 static void __endio_write_update_ordered(struct inode *inode,
101                                          const u64 offset, const u64 bytes,
102                                          const bool uptodate);
103 
104 /*
105  * Cleanup all submitted ordered extents in specified range to handle errors
106  * from the fill_dellaloc() callback.
107  *
108  * NOTE: caller must ensure that when an error happens, it can not call
109  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
110  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
111  * to be released, which we want to happen only when finishing the ordered
112  * extent (btrfs_finish_ordered_io()). Also note that the caller of the
113  * fill_delalloc() callback already does proper cleanup for the first page of
114  * the range, that is, it invokes the callback writepage_end_io_hook() for the
115  * range of the first page.
116  */
117 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
118                                                  const u64 offset,
119                                                  const u64 bytes)
120 {
121         unsigned long index = offset >> PAGE_SHIFT;
122         unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
123         struct page *page;
124 
125         while (index <= end_index) {
126                 page = find_get_page(inode->i_mapping, index);
127                 index++;
128                 if (!page)
129                         continue;
130                 ClearPagePrivate2(page);
131                 put_page(page);
132         }
133         return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
134                                             bytes - PAGE_SIZE, false);
135 }
136 
137 static int btrfs_dirty_inode(struct inode *inode);
138 
139 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
140 void btrfs_test_inode_set_ops(struct inode *inode)
141 {
142         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
143 }
144 #endif
145 
146 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
147                                      struct inode *inode,  struct inode *dir,
148                                      const struct qstr *qstr)
149 {
150         int err;
151 
152         err = btrfs_init_acl(trans, inode, dir);
153         if (!err)
154                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
155         return err;
156 }
157 
158 /*
159  * this does all the hard work for inserting an inline extent into
160  * the btree.  The caller should have done a btrfs_drop_extents so that
161  * no overlapping inline items exist in the btree
162  */
163 static int insert_inline_extent(struct btrfs_trans_handle *trans,
164                                 struct btrfs_path *path, int extent_inserted,
165                                 struct btrfs_root *root, struct inode *inode,
166                                 u64 start, size_t size, size_t compressed_size,
167                                 int compress_type,
168                                 struct page **compressed_pages)
169 {
170         struct extent_buffer *leaf;
171         struct page *page = NULL;
172         char *kaddr;
173         unsigned long ptr;
174         struct btrfs_file_extent_item *ei;
175         int ret;
176         size_t cur_size = size;
177         unsigned long offset;
178 
179         if (compressed_size && compressed_pages)
180                 cur_size = compressed_size;
181 
182         inode_add_bytes(inode, size);
183 
184         if (!extent_inserted) {
185                 struct btrfs_key key;
186                 size_t datasize;
187 
188                 key.objectid = btrfs_ino(BTRFS_I(inode));
189                 key.offset = start;
190                 key.type = BTRFS_EXTENT_DATA_KEY;
191 
192                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
193                 path->leave_spinning = 1;
194                 ret = btrfs_insert_empty_item(trans, root, path, &key,
195                                               datasize);
196                 if (ret)
197                         goto fail;
198         }
199         leaf = path->nodes[0];
200         ei = btrfs_item_ptr(leaf, path->slots[0],
201                             struct btrfs_file_extent_item);
202         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
203         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
204         btrfs_set_file_extent_encryption(leaf, ei, 0);
205         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
206         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
207         ptr = btrfs_file_extent_inline_start(ei);
208 
209         if (compress_type != BTRFS_COMPRESS_NONE) {
210                 struct page *cpage;
211                 int i = 0;
212                 while (compressed_size > 0) {
213                         cpage = compressed_pages[i];
214                         cur_size = min_t(unsigned long, compressed_size,
215                                        PAGE_SIZE);
216 
217                         kaddr = kmap_atomic(cpage);
218                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
219                         kunmap_atomic(kaddr);
220 
221                         i++;
222                         ptr += cur_size;
223                         compressed_size -= cur_size;
224                 }
225                 btrfs_set_file_extent_compression(leaf, ei,
226                                                   compress_type);
227         } else {
228                 page = find_get_page(inode->i_mapping,
229                                      start >> PAGE_SHIFT);
230                 btrfs_set_file_extent_compression(leaf, ei, 0);
231                 kaddr = kmap_atomic(page);
232                 offset = start & (PAGE_SIZE - 1);
233                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
234                 kunmap_atomic(kaddr);
235                 put_page(page);
236         }
237         btrfs_mark_buffer_dirty(leaf);
238         btrfs_release_path(path);
239 
240         /*
241          * we're an inline extent, so nobody can
242          * extend the file past i_size without locking
243          * a page we already have locked.
244          *
245          * We must do any isize and inode updates
246          * before we unlock the pages.  Otherwise we
247          * could end up racing with unlink.
248          */
249         BTRFS_I(inode)->disk_i_size = inode->i_size;
250         ret = btrfs_update_inode(trans, root, inode);
251 
252 fail:
253         return ret;
254 }
255 
256 
257 /*
258  * conditionally insert an inline extent into the file.  This
259  * does the checks required to make sure the data is small enough
260  * to fit as an inline extent.
261  */
262 static noinline int cow_file_range_inline(struct inode *inode, u64 start,
263                                           u64 end, size_t compressed_size,
264                                           int compress_type,
265                                           struct page **compressed_pages)
266 {
267         struct btrfs_root *root = BTRFS_I(inode)->root;
268         struct btrfs_fs_info *fs_info = root->fs_info;
269         struct btrfs_trans_handle *trans;
270         u64 isize = i_size_read(inode);
271         u64 actual_end = min(end + 1, isize);
272         u64 inline_len = actual_end - start;
273         u64 aligned_end = ALIGN(end, fs_info->sectorsize);
274         u64 data_len = inline_len;
275         int ret;
276         struct btrfs_path *path;
277         int extent_inserted = 0;
278         u32 extent_item_size;
279 
280         if (compressed_size)
281                 data_len = compressed_size;
282 
283         if (start > 0 ||
284             actual_end > fs_info->sectorsize ||
285             data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
286             (!compressed_size &&
287             (actual_end & (fs_info->sectorsize - 1)) == 0) ||
288             end + 1 < isize ||
289             data_len > fs_info->max_inline) {
290                 return 1;
291         }
292 
293         path = btrfs_alloc_path();
294         if (!path)
295                 return -ENOMEM;
296 
297         trans = btrfs_join_transaction(root);
298         if (IS_ERR(trans)) {
299                 btrfs_free_path(path);
300                 return PTR_ERR(trans);
301         }
302         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
303 
304         if (compressed_size && compressed_pages)
305                 extent_item_size = btrfs_file_extent_calc_inline_size(
306                    compressed_size);
307         else
308                 extent_item_size = btrfs_file_extent_calc_inline_size(
309                     inline_len);
310 
311         ret = __btrfs_drop_extents(trans, root, inode, path,
312                                    start, aligned_end, NULL,
313                                    1, 1, extent_item_size, &extent_inserted);
314         if (ret) {
315                 btrfs_abort_transaction(trans, ret);
316                 goto out;
317         }
318 
319         if (isize > actual_end)
320                 inline_len = min_t(u64, isize, actual_end);
321         ret = insert_inline_extent(trans, path, extent_inserted,
322                                    root, inode, start,
323                                    inline_len, compressed_size,
324                                    compress_type, compressed_pages);
325         if (ret && ret != -ENOSPC) {
326                 btrfs_abort_transaction(trans, ret);
327                 goto out;
328         } else if (ret == -ENOSPC) {
329                 ret = 1;
330                 goto out;
331         }
332 
333         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
334         btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
335 out:
336         /*
337          * Don't forget to free the reserved space, as for inlined extent
338          * it won't count as data extent, free them directly here.
339          * And at reserve time, it's always aligned to page size, so
340          * just free one page here.
341          */
342         btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
343         btrfs_free_path(path);
344         btrfs_end_transaction(trans);
345         return ret;
346 }
347 
348 struct async_extent {
349         u64 start;
350         u64 ram_size;
351         u64 compressed_size;
352         struct page **pages;
353         unsigned long nr_pages;
354         int compress_type;
355         struct list_head list;
356 };
357 
358 struct async_cow {
359         struct inode *inode;
360         struct btrfs_root *root;
361         struct page *locked_page;
362         u64 start;
363         u64 end;
364         unsigned int write_flags;
365         struct list_head extents;
366         struct btrfs_work work;
367 };
368 
369 static noinline int add_async_extent(struct async_cow *cow,
370                                      u64 start, u64 ram_size,
371                                      u64 compressed_size,
372                                      struct page **pages,
373                                      unsigned long nr_pages,
374                                      int compress_type)
375 {
376         struct async_extent *async_extent;
377 
378         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
379         BUG_ON(!async_extent); /* -ENOMEM */
380         async_extent->start = start;
381         async_extent->ram_size = ram_size;
382         async_extent->compressed_size = compressed_size;
383         async_extent->pages = pages;
384         async_extent->nr_pages = nr_pages;
385         async_extent->compress_type = compress_type;
386         list_add_tail(&async_extent->list, &cow->extents);
387         return 0;
388 }
389 
390 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
391 {
392         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
393 
394         /* force compress */
395         if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
396                 return 1;
397         /* defrag ioctl */
398         if (BTRFS_I(inode)->defrag_compress)
399                 return 1;
400         /* bad compression ratios */
401         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
402                 return 0;
403         if (btrfs_test_opt(fs_info, COMPRESS) ||
404             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
405             BTRFS_I(inode)->prop_compress)
406                 return btrfs_compress_heuristic(inode, start, end);
407         return 0;
408 }
409 
410 static inline void inode_should_defrag(struct btrfs_inode *inode,
411                 u64 start, u64 end, u64 num_bytes, u64 small_write)
412 {
413         /* If this is a small write inside eof, kick off a defrag */
414         if (num_bytes < small_write &&
415             (start > 0 || end + 1 < inode->disk_i_size))
416                 btrfs_add_inode_defrag(NULL, inode);
417 }
418 
419 /*
420  * we create compressed extents in two phases.  The first
421  * phase compresses a range of pages that have already been
422  * locked (both pages and state bits are locked).
423  *
424  * This is done inside an ordered work queue, and the compression
425  * is spread across many cpus.  The actual IO submission is step
426  * two, and the ordered work queue takes care of making sure that
427  * happens in the same order things were put onto the queue by
428  * writepages and friends.
429  *
430  * If this code finds it can't get good compression, it puts an
431  * entry onto the work queue to write the uncompressed bytes.  This
432  * makes sure that both compressed inodes and uncompressed inodes
433  * are written in the same order that the flusher thread sent them
434  * down.
435  */
436 static noinline void compress_file_range(struct inode *inode,
437                                         struct page *locked_page,
438                                         u64 start, u64 end,
439                                         struct async_cow *async_cow,
440                                         int *num_added)
441 {
442         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
443         u64 blocksize = fs_info->sectorsize;
444         u64 actual_end;
445         u64 isize = i_size_read(inode);
446         int ret = 0;
447         struct page **pages = NULL;
448         unsigned long nr_pages;
449         unsigned long total_compressed = 0;
450         unsigned long total_in = 0;
451         int i;
452         int will_compress;
453         int compress_type = fs_info->compress_type;
454         int redirty = 0;
455 
456         inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
457                         SZ_16K);
458 
459         actual_end = min_t(u64, isize, end + 1);
460 again:
461         will_compress = 0;
462         nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
463         BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
464         nr_pages = min_t(unsigned long, nr_pages,
465                         BTRFS_MAX_COMPRESSED / PAGE_SIZE);
466 
467         /*
468          * we don't want to send crud past the end of i_size through
469          * compression, that's just a waste of CPU time.  So, if the
470          * end of the file is before the start of our current
471          * requested range of bytes, we bail out to the uncompressed
472          * cleanup code that can deal with all of this.
473          *
474          * It isn't really the fastest way to fix things, but this is a
475          * very uncommon corner.
476          */
477         if (actual_end <= start)
478                 goto cleanup_and_bail_uncompressed;
479 
480         total_compressed = actual_end - start;
481 
482         /*
483          * skip compression for a small file range(<=blocksize) that
484          * isn't an inline extent, since it doesn't save disk space at all.
485          */
486         if (total_compressed <= blocksize &&
487            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
488                 goto cleanup_and_bail_uncompressed;
489 
490         total_compressed = min_t(unsigned long, total_compressed,
491                         BTRFS_MAX_UNCOMPRESSED);
492         total_in = 0;
493         ret = 0;
494 
495         /*
496          * we do compression for mount -o compress and when the
497          * inode has not been flagged as nocompress.  This flag can
498          * change at any time if we discover bad compression ratios.
499          */
500         if (inode_need_compress(inode, start, end)) {
501                 WARN_ON(pages);
502                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
503                 if (!pages) {
504                         /* just bail out to the uncompressed code */
505                         nr_pages = 0;
506                         goto cont;
507                 }
508 
509                 if (BTRFS_I(inode)->defrag_compress)
510                         compress_type = BTRFS_I(inode)->defrag_compress;
511                 else if (BTRFS_I(inode)->prop_compress)
512                         compress_type = BTRFS_I(inode)->prop_compress;
513 
514                 /*
515                  * we need to call clear_page_dirty_for_io on each
516                  * page in the range.  Otherwise applications with the file
517                  * mmap'd can wander in and change the page contents while
518                  * we are compressing them.
519                  *
520                  * If the compression fails for any reason, we set the pages
521                  * dirty again later on.
522                  *
523                  * Note that the remaining part is redirtied, the start pointer
524                  * has moved, the end is the original one.
525                  */
526                 if (!redirty) {
527                         extent_range_clear_dirty_for_io(inode, start, end);
528                         redirty = 1;
529                 }
530 
531                 /* Compression level is applied here and only here */
532                 ret = btrfs_compress_pages(
533                         compress_type | (fs_info->compress_level << 4),
534                                            inode->i_mapping, start,
535                                            pages,
536                                            &nr_pages,
537                                            &total_in,
538                                            &total_compressed);
539 
540                 if (!ret) {
541                         unsigned long offset = total_compressed &
542                                 (PAGE_SIZE - 1);
543                         struct page *page = pages[nr_pages - 1];
544                         char *kaddr;
545 
546                         /* zero the tail end of the last page, we might be
547                          * sending it down to disk
548                          */
549                         if (offset) {
550                                 kaddr = kmap_atomic(page);
551                                 memset(kaddr + offset, 0,
552                                        PAGE_SIZE - offset);
553                                 kunmap_atomic(kaddr);
554                         }
555                         will_compress = 1;
556                 }
557         }
558 cont:
559         if (start == 0) {
560                 /* lets try to make an inline extent */
561                 if (ret || total_in < actual_end) {
562                         /* we didn't compress the entire range, try
563                          * to make an uncompressed inline extent.
564                          */
565                         ret = cow_file_range_inline(inode, start, end, 0,
566                                                     BTRFS_COMPRESS_NONE, NULL);
567                 } else {
568                         /* try making a compressed inline extent */
569                         ret = cow_file_range_inline(inode, start, end,
570                                                     total_compressed,
571                                                     compress_type, pages);
572                 }
573                 if (ret <= 0) {
574                         unsigned long clear_flags = EXTENT_DELALLOC |
575                                 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
576                                 EXTENT_DO_ACCOUNTING;
577                         unsigned long page_error_op;
578 
579                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
580 
581                         /*
582                          * inline extent creation worked or returned error,
583                          * we don't need to create any more async work items.
584                          * Unlock and free up our temp pages.
585                          *
586                          * We use DO_ACCOUNTING here because we need the
587                          * delalloc_release_metadata to be done _after_ we drop
588                          * our outstanding extent for clearing delalloc for this
589                          * range.
590                          */
591                         extent_clear_unlock_delalloc(inode, start, end, end,
592                                                      NULL, clear_flags,
593                                                      PAGE_UNLOCK |
594                                                      PAGE_CLEAR_DIRTY |
595                                                      PAGE_SET_WRITEBACK |
596                                                      page_error_op |
597                                                      PAGE_END_WRITEBACK);
598                         goto free_pages_out;
599                 }
600         }
601 
602         if (will_compress) {
603                 /*
604                  * we aren't doing an inline extent round the compressed size
605                  * up to a block size boundary so the allocator does sane
606                  * things
607                  */
608                 total_compressed = ALIGN(total_compressed, blocksize);
609 
610                 /*
611                  * one last check to make sure the compression is really a
612                  * win, compare the page count read with the blocks on disk,
613                  * compression must free at least one sector size
614                  */
615                 total_in = ALIGN(total_in, PAGE_SIZE);
616                 if (total_compressed + blocksize <= total_in) {
617                         *num_added += 1;
618 
619                         /*
620                          * The async work queues will take care of doing actual
621                          * allocation on disk for these compressed pages, and
622                          * will submit them to the elevator.
623                          */
624                         add_async_extent(async_cow, start, total_in,
625                                         total_compressed, pages, nr_pages,
626                                         compress_type);
627 
628                         if (start + total_in < end) {
629                                 start += total_in;
630                                 pages = NULL;
631                                 cond_resched();
632                                 goto again;
633                         }
634                         return;
635                 }
636         }
637         if (pages) {
638                 /*
639                  * the compression code ran but failed to make things smaller,
640                  * free any pages it allocated and our page pointer array
641                  */
642                 for (i = 0; i < nr_pages; i++) {
643                         WARN_ON(pages[i]->mapping);
644                         put_page(pages[i]);
645                 }
646                 kfree(pages);
647                 pages = NULL;
648                 total_compressed = 0;
649                 nr_pages = 0;
650 
651                 /* flag the file so we don't compress in the future */
652                 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
653                     !(BTRFS_I(inode)->prop_compress)) {
654                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
655                 }
656         }
657 cleanup_and_bail_uncompressed:
658         /*
659          * No compression, but we still need to write the pages in the file
660          * we've been given so far.  redirty the locked page if it corresponds
661          * to our extent and set things up for the async work queue to run
662          * cow_file_range to do the normal delalloc dance.
663          */
664         if (page_offset(locked_page) >= start &&
665             page_offset(locked_page) <= end)
666                 __set_page_dirty_nobuffers(locked_page);
667                 /* unlocked later on in the async handlers */
668 
669         if (redirty)
670                 extent_range_redirty_for_io(inode, start, end);
671         add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
672                          BTRFS_COMPRESS_NONE);
673         *num_added += 1;
674 
675         return;
676 
677 free_pages_out:
678         for (i = 0; i < nr_pages; i++) {
679                 WARN_ON(pages[i]->mapping);
680                 put_page(pages[i]);
681         }
682         kfree(pages);
683 }
684 
685 static void free_async_extent_pages(struct async_extent *async_extent)
686 {
687         int i;
688 
689         if (!async_extent->pages)
690                 return;
691 
692         for (i = 0; i < async_extent->nr_pages; i++) {
693                 WARN_ON(async_extent->pages[i]->mapping);
694                 put_page(async_extent->pages[i]);
695         }
696         kfree(async_extent->pages);
697         async_extent->nr_pages = 0;
698         async_extent->pages = NULL;
699 }
700 
701 /*
702  * phase two of compressed writeback.  This is the ordered portion
703  * of the code, which only gets called in the order the work was
704  * queued.  We walk all the async extents created by compress_file_range
705  * and send them down to the disk.
706  */
707 static noinline void submit_compressed_extents(struct inode *inode,
708                                               struct async_cow *async_cow)
709 {
710         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
711         struct async_extent *async_extent;
712         u64 alloc_hint = 0;
713         struct btrfs_key ins;
714         struct extent_map *em;
715         struct btrfs_root *root = BTRFS_I(inode)->root;
716         struct extent_io_tree *io_tree;
717         int ret = 0;
718 
719 again:
720         while (!list_empty(&async_cow->extents)) {
721                 async_extent = list_entry(async_cow->extents.next,
722                                           struct async_extent, list);
723                 list_del(&async_extent->list);
724 
725                 io_tree = &BTRFS_I(inode)->io_tree;
726 
727 retry:
728                 /* did the compression code fall back to uncompressed IO? */
729                 if (!async_extent->pages) {
730                         int page_started = 0;
731                         unsigned long nr_written = 0;
732 
733                         lock_extent(io_tree, async_extent->start,
734                                          async_extent->start +
735                                          async_extent->ram_size - 1);
736 
737                         /* allocate blocks */
738                         ret = cow_file_range(inode, async_cow->locked_page,
739                                              async_extent->start,
740                                              async_extent->start +
741                                              async_extent->ram_size - 1,
742                                              async_extent->start +
743                                              async_extent->ram_size - 1,
744                                              &page_started, &nr_written, 0,
745                                              NULL);
746 
747                         /* JDM XXX */
748 
749                         /*
750                          * if page_started, cow_file_range inserted an
751                          * inline extent and took care of all the unlocking
752                          * and IO for us.  Otherwise, we need to submit
753                          * all those pages down to the drive.
754                          */
755                         if (!page_started && !ret)
756                                 extent_write_locked_range(inode,
757                                                   async_extent->start,
758                                                   async_extent->start +
759                                                   async_extent->ram_size - 1,
760                                                   WB_SYNC_ALL);
761                         else if (ret)
762                                 unlock_page(async_cow->locked_page);
763                         kfree(async_extent);
764                         cond_resched();
765                         continue;
766                 }
767 
768                 lock_extent(io_tree, async_extent->start,
769                             async_extent->start + async_extent->ram_size - 1);
770 
771                 ret = btrfs_reserve_extent(root, async_extent->ram_size,
772                                            async_extent->compressed_size,
773                                            async_extent->compressed_size,
774                                            0, alloc_hint, &ins, 1, 1);
775                 if (ret) {
776                         free_async_extent_pages(async_extent);
777 
778                         if (ret == -ENOSPC) {
779                                 unlock_extent(io_tree, async_extent->start,
780                                               async_extent->start +
781                                               async_extent->ram_size - 1);
782 
783                                 /*
784                                  * we need to redirty the pages if we decide to
785                                  * fallback to uncompressed IO, otherwise we
786                                  * will not submit these pages down to lower
787                                  * layers.
788                                  */
789                                 extent_range_redirty_for_io(inode,
790                                                 async_extent->start,
791                                                 async_extent->start +
792                                                 async_extent->ram_size - 1);
793 
794                                 goto retry;
795                         }
796                         goto out_free;
797                 }
798                 /*
799                  * here we're doing allocation and writeback of the
800                  * compressed pages
801                  */
802                 em = create_io_em(inode, async_extent->start,
803                                   async_extent->ram_size, /* len */
804                                   async_extent->start, /* orig_start */
805                                   ins.objectid, /* block_start */
806                                   ins.offset, /* block_len */
807                                   ins.offset, /* orig_block_len */
808                                   async_extent->ram_size, /* ram_bytes */
809                                   async_extent->compress_type,
810                                   BTRFS_ORDERED_COMPRESSED);
811                 if (IS_ERR(em))
812                         /* ret value is not necessary due to void function */
813                         goto out_free_reserve;
814                 free_extent_map(em);
815 
816                 ret = btrfs_add_ordered_extent_compress(inode,
817                                                 async_extent->start,
818                                                 ins.objectid,
819                                                 async_extent->ram_size,
820                                                 ins.offset,
821                                                 BTRFS_ORDERED_COMPRESSED,
822                                                 async_extent->compress_type);
823                 if (ret) {
824                         btrfs_drop_extent_cache(BTRFS_I(inode),
825                                                 async_extent->start,
826                                                 async_extent->start +
827                                                 async_extent->ram_size - 1, 0);
828                         goto out_free_reserve;
829                 }
830                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
831 
832                 /*
833                  * clear dirty, set writeback and unlock the pages.
834                  */
835                 extent_clear_unlock_delalloc(inode, async_extent->start,
836                                 async_extent->start +
837                                 async_extent->ram_size - 1,
838                                 async_extent->start +
839                                 async_extent->ram_size - 1,
840                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
841                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
842                                 PAGE_SET_WRITEBACK);
843                 if (btrfs_submit_compressed_write(inode,
844                                     async_extent->start,
845                                     async_extent->ram_size,
846                                     ins.objectid,
847                                     ins.offset, async_extent->pages,
848                                     async_extent->nr_pages,
849                                     async_cow->write_flags)) {
850                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
851                         struct page *p = async_extent->pages[0];
852                         const u64 start = async_extent->start;
853                         const u64 end = start + async_extent->ram_size - 1;
854 
855                         p->mapping = inode->i_mapping;
856                         tree->ops->writepage_end_io_hook(p, start, end,
857                                                          NULL, 0);
858                         p->mapping = NULL;
859                         extent_clear_unlock_delalloc(inode, start, end, end,
860                                                      NULL, 0,
861                                                      PAGE_END_WRITEBACK |
862                                                      PAGE_SET_ERROR);
863                         free_async_extent_pages(async_extent);
864                 }
865                 alloc_hint = ins.objectid + ins.offset;
866                 kfree(async_extent);
867                 cond_resched();
868         }
869         return;
870 out_free_reserve:
871         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
872         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
873 out_free:
874         extent_clear_unlock_delalloc(inode, async_extent->start,
875                                      async_extent->start +
876                                      async_extent->ram_size - 1,
877                                      async_extent->start +
878                                      async_extent->ram_size - 1,
879                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
880                                      EXTENT_DELALLOC_NEW |
881                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
882                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
883                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
884                                      PAGE_SET_ERROR);
885         free_async_extent_pages(async_extent);
886         kfree(async_extent);
887         goto again;
888 }
889 
890 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
891                                       u64 num_bytes)
892 {
893         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
894         struct extent_map *em;
895         u64 alloc_hint = 0;
896 
897         read_lock(&em_tree->lock);
898         em = search_extent_mapping(em_tree, start, num_bytes);
899         if (em) {
900                 /*
901                  * if block start isn't an actual block number then find the
902                  * first block in this inode and use that as a hint.  If that
903                  * block is also bogus then just don't worry about it.
904                  */
905                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
906                         free_extent_map(em);
907                         em = search_extent_mapping(em_tree, 0, 0);
908                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
909                                 alloc_hint = em->block_start;
910                         if (em)
911                                 free_extent_map(em);
912                 } else {
913                         alloc_hint = em->block_start;
914                         free_extent_map(em);
915                 }
916         }
917         read_unlock(&em_tree->lock);
918 
919         return alloc_hint;
920 }
921 
922 /*
923  * when extent_io.c finds a delayed allocation range in the file,
924  * the call backs end up in this code.  The basic idea is to
925  * allocate extents on disk for the range, and create ordered data structs
926  * in ram to track those extents.
927  *
928  * locked_page is the page that writepage had locked already.  We use
929  * it to make sure we don't do extra locks or unlocks.
930  *
931  * *page_started is set to one if we unlock locked_page and do everything
932  * required to start IO on it.  It may be clean and already done with
933  * IO when we return.
934  */
935 static noinline int cow_file_range(struct inode *inode,
936                                    struct page *locked_page,
937                                    u64 start, u64 end, u64 delalloc_end,
938                                    int *page_started, unsigned long *nr_written,
939                                    int unlock, struct btrfs_dedupe_hash *hash)
940 {
941         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
942         struct btrfs_root *root = BTRFS_I(inode)->root;
943         u64 alloc_hint = 0;
944         u64 num_bytes;
945         unsigned long ram_size;
946         u64 cur_alloc_size = 0;
947         u64 blocksize = fs_info->sectorsize;
948         struct btrfs_key ins;
949         struct extent_map *em;
950         unsigned clear_bits;
951         unsigned long page_ops;
952         bool extent_reserved = false;
953         int ret = 0;
954 
955         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
956                 WARN_ON_ONCE(1);
957                 ret = -EINVAL;
958                 goto out_unlock;
959         }
960 
961         num_bytes = ALIGN(end - start + 1, blocksize);
962         num_bytes = max(blocksize,  num_bytes);
963         ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
964 
965         inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
966 
967         if (start == 0) {
968                 /* lets try to make an inline extent */
969                 ret = cow_file_range_inline(inode, start, end, 0,
970                                             BTRFS_COMPRESS_NONE, NULL);
971                 if (ret == 0) {
972                         /*
973                          * We use DO_ACCOUNTING here because we need the
974                          * delalloc_release_metadata to be run _after_ we drop
975                          * our outstanding extent for clearing delalloc for this
976                          * range.
977                          */
978                         extent_clear_unlock_delalloc(inode, start, end,
979                                      delalloc_end, NULL,
980                                      EXTENT_LOCKED | EXTENT_DELALLOC |
981                                      EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
982                                      EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
983                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
984                                      PAGE_END_WRITEBACK);
985                         *nr_written = *nr_written +
986                              (end - start + PAGE_SIZE) / PAGE_SIZE;
987                         *page_started = 1;
988                         goto out;
989                 } else if (ret < 0) {
990                         goto out_unlock;
991                 }
992         }
993 
994         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
995         btrfs_drop_extent_cache(BTRFS_I(inode), start,
996                         start + num_bytes - 1, 0);
997 
998         while (num_bytes > 0) {
999                 cur_alloc_size = num_bytes;
1000                 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1001                                            fs_info->sectorsize, 0, alloc_hint,
1002                                            &ins, 1, 1);
1003                 if (ret < 0)
1004                         goto out_unlock;
1005                 cur_alloc_size = ins.offset;
1006                 extent_reserved = true;
1007 
1008                 ram_size = ins.offset;
1009                 em = create_io_em(inode, start, ins.offset, /* len */
1010                                   start, /* orig_start */
1011                                   ins.objectid, /* block_start */
1012                                   ins.offset, /* block_len */
1013                                   ins.offset, /* orig_block_len */
1014                                   ram_size, /* ram_bytes */
1015                                   BTRFS_COMPRESS_NONE, /* compress_type */
1016                                   BTRFS_ORDERED_REGULAR /* type */);
1017                 if (IS_ERR(em)) {
1018                         ret = PTR_ERR(em);
1019                         goto out_reserve;
1020                 }
1021                 free_extent_map(em);
1022 
1023                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1024                                                ram_size, cur_alloc_size, 0);
1025                 if (ret)
1026                         goto out_drop_extent_cache;
1027 
1028                 if (root->root_key.objectid ==
1029                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1030                         ret = btrfs_reloc_clone_csums(inode, start,
1031                                                       cur_alloc_size);
1032                         /*
1033                          * Only drop cache here, and process as normal.
1034                          *
1035                          * We must not allow extent_clear_unlock_delalloc()
1036                          * at out_unlock label to free meta of this ordered
1037                          * extent, as its meta should be freed by
1038                          * btrfs_finish_ordered_io().
1039                          *
1040                          * So we must continue until @start is increased to
1041                          * skip current ordered extent.
1042                          */
1043                         if (ret)
1044                                 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1045                                                 start + ram_size - 1, 0);
1046                 }
1047 
1048                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1049 
1050                 /* we're not doing compressed IO, don't unlock the first
1051                  * page (which the caller expects to stay locked), don't
1052                  * clear any dirty bits and don't set any writeback bits
1053                  *
1054                  * Do set the Private2 bit so we know this page was properly
1055                  * setup for writepage
1056                  */
1057                 page_ops = unlock ? PAGE_UNLOCK : 0;
1058                 page_ops |= PAGE_SET_PRIVATE2;
1059 
1060                 extent_clear_unlock_delalloc(inode, start,
1061                                              start + ram_size - 1,
1062                                              delalloc_end, locked_page,
1063                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1064                                              page_ops);
1065                 if (num_bytes < cur_alloc_size)
1066                         num_bytes = 0;
1067                 else
1068                         num_bytes -= cur_alloc_size;
1069                 alloc_hint = ins.objectid + ins.offset;
1070                 start += cur_alloc_size;
1071                 extent_reserved = false;
1072 
1073                 /*
1074                  * btrfs_reloc_clone_csums() error, since start is increased
1075                  * extent_clear_unlock_delalloc() at out_unlock label won't
1076                  * free metadata of current ordered extent, we're OK to exit.
1077                  */
1078                 if (ret)
1079                         goto out_unlock;
1080         }
1081 out:
1082         return ret;
1083 
1084 out_drop_extent_cache:
1085         btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1086 out_reserve:
1087         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1088         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1089 out_unlock:
1090         clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1091                 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1092         page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1093                 PAGE_END_WRITEBACK;
1094         /*
1095          * If we reserved an extent for our delalloc range (or a subrange) and
1096          * failed to create the respective ordered extent, then it means that
1097          * when we reserved the extent we decremented the extent's size from
1098          * the data space_info's bytes_may_use counter and incremented the
1099          * space_info's bytes_reserved counter by the same amount. We must make
1100          * sure extent_clear_unlock_delalloc() does not try to decrement again
1101          * the data space_info's bytes_may_use counter, therefore we do not pass
1102          * it the flag EXTENT_CLEAR_DATA_RESV.
1103          */
1104         if (extent_reserved) {
1105                 extent_clear_unlock_delalloc(inode, start,
1106                                              start + cur_alloc_size,
1107                                              start + cur_alloc_size,
1108                                              locked_page,
1109                                              clear_bits,
1110                                              page_ops);
1111                 start += cur_alloc_size;
1112                 if (start >= end)
1113                         goto out;
1114         }
1115         extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1116                                      locked_page,
1117                                      clear_bits | EXTENT_CLEAR_DATA_RESV,
1118                                      page_ops);
1119         goto out;
1120 }
1121 
1122 /*
1123  * work queue call back to started compression on a file and pages
1124  */
1125 static noinline void async_cow_start(struct btrfs_work *work)
1126 {
1127         struct async_cow *async_cow;
1128         int num_added = 0;
1129         async_cow = container_of(work, struct async_cow, work);
1130 
1131         compress_file_range(async_cow->inode, async_cow->locked_page,
1132                             async_cow->start, async_cow->end, async_cow,
1133                             &num_added);
1134         if (num_added == 0) {
1135                 btrfs_add_delayed_iput(async_cow->inode);
1136                 async_cow->inode = NULL;
1137         }
1138 }
1139 
1140 /*
1141  * work queue call back to submit previously compressed pages
1142  */
1143 static noinline void async_cow_submit(struct btrfs_work *work)
1144 {
1145         struct btrfs_fs_info *fs_info;
1146         struct async_cow *async_cow;
1147         struct btrfs_root *root;
1148         unsigned long nr_pages;
1149 
1150         async_cow = container_of(work, struct async_cow, work);
1151 
1152         root = async_cow->root;
1153         fs_info = root->fs_info;
1154         nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1155                 PAGE_SHIFT;
1156 
1157         /* atomic_sub_return implies a barrier */
1158         if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1159             5 * SZ_1M)
1160                 cond_wake_up_nomb(&fs_info->async_submit_wait);
1161 
1162         if (async_cow->inode)
1163                 submit_compressed_extents(async_cow->inode, async_cow);
1164 }
1165 
1166 static noinline void async_cow_free(struct btrfs_work *work)
1167 {
1168         struct async_cow *async_cow;
1169         async_cow = container_of(work, struct async_cow, work);
1170         if (async_cow->inode)
1171                 btrfs_add_delayed_iput(async_cow->inode);
1172         kfree(async_cow);
1173 }
1174 
1175 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1176                                 u64 start, u64 end, int *page_started,
1177                                 unsigned long *nr_written,
1178                                 unsigned int write_flags)
1179 {
1180         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1181         struct async_cow *async_cow;
1182         struct btrfs_root *root = BTRFS_I(inode)->root;
1183         unsigned long nr_pages;
1184         u64 cur_end;
1185 
1186         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1187                          1, 0, NULL);
1188         while (start < end) {
1189                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1190                 BUG_ON(!async_cow); /* -ENOMEM */
1191                 async_cow->inode = igrab(inode);
1192                 async_cow->root = root;
1193                 async_cow->locked_page = locked_page;
1194                 async_cow->start = start;
1195                 async_cow->write_flags = write_flags;
1196 
1197                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1198                     !btrfs_test_opt(fs_info, FORCE_COMPRESS))
1199                         cur_end = end;
1200                 else
1201                         cur_end = min(end, start + SZ_512K - 1);
1202 
1203                 async_cow->end = cur_end;
1204                 INIT_LIST_HEAD(&async_cow->extents);
1205 
1206                 btrfs_init_work(&async_cow->work,
1207                                 btrfs_delalloc_helper,
1208                                 async_cow_start, async_cow_submit,
1209                                 async_cow_free);
1210 
1211                 nr_pages = (cur_end - start + PAGE_SIZE) >>
1212                         PAGE_SHIFT;
1213                 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1214 
1215                 btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
1216 
1217                 *nr_written += nr_pages;
1218                 start = cur_end + 1;
1219         }
1220         *page_started = 1;
1221         return 0;
1222 }
1223 
1224 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1225                                         u64 bytenr, u64 num_bytes)
1226 {
1227         int ret;
1228         struct btrfs_ordered_sum *sums;
1229         LIST_HEAD(list);
1230 
1231         ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1232                                        bytenr + num_bytes - 1, &list, 0);
1233         if (ret == 0 && list_empty(&list))
1234                 return 0;
1235 
1236         while (!list_empty(&list)) {
1237                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1238                 list_del(&sums->list);
1239                 kfree(sums);
1240         }
1241         if (ret < 0)
1242                 return ret;
1243         return 1;
1244 }
1245 
1246 /*
1247  * when nowcow writeback call back.  This checks for snapshots or COW copies
1248  * of the extents that exist in the file, and COWs the file as required.
1249  *
1250  * If no cow copies or snapshots exist, we write directly to the existing
1251  * blocks on disk
1252  */
1253 static noinline int run_delalloc_nocow(struct inode *inode,
1254                                        struct page *locked_page,
1255                               u64 start, u64 end, int *page_started, int force,
1256                               unsigned long *nr_written)
1257 {
1258         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1259         struct btrfs_root *root = BTRFS_I(inode)->root;
1260         struct extent_buffer *leaf;
1261         struct btrfs_path *path;
1262         struct btrfs_file_extent_item *fi;
1263         struct btrfs_key found_key;
1264         struct extent_map *em;
1265         u64 cow_start;
1266         u64 cur_offset;
1267         u64 extent_end;
1268         u64 extent_offset;
1269         u64 disk_bytenr;
1270         u64 num_bytes;
1271         u64 disk_num_bytes;
1272         u64 ram_bytes;
1273         int extent_type;
1274         int ret;
1275         int type;
1276         int nocow;
1277         int check_prev = 1;
1278         bool nolock;
1279         u64 ino = btrfs_ino(BTRFS_I(inode));
1280 
1281         path = btrfs_alloc_path();
1282         if (!path) {
1283                 extent_clear_unlock_delalloc(inode, start, end, end,
1284                                              locked_page,
1285                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1286                                              EXTENT_DO_ACCOUNTING |
1287                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1288                                              PAGE_CLEAR_DIRTY |
1289                                              PAGE_SET_WRITEBACK |
1290                                              PAGE_END_WRITEBACK);
1291                 return -ENOMEM;
1292         }
1293 
1294         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
1295 
1296         cow_start = (u64)-1;
1297         cur_offset = start;
1298         while (1) {
1299                 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1300                                                cur_offset, 0);
1301                 if (ret < 0)
1302                         goto error;
1303                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1304                         leaf = path->nodes[0];
1305                         btrfs_item_key_to_cpu(leaf, &found_key,
1306                                               path->slots[0] - 1);
1307                         if (found_key.objectid == ino &&
1308                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1309                                 path->slots[0]--;
1310                 }
1311                 check_prev = 0;
1312 next_slot:
1313                 leaf = path->nodes[0];
1314                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1315                         ret = btrfs_next_leaf(root, path);
1316                         if (ret < 0) {
1317                                 if (cow_start != (u64)-1)
1318                                         cur_offset = cow_start;
1319                                 goto error;
1320                         }
1321                         if (ret > 0)
1322                                 break;
1323                         leaf = path->nodes[0];
1324                 }
1325 
1326                 nocow = 0;
1327                 disk_bytenr = 0;
1328                 num_bytes = 0;
1329                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1330 
1331                 if (found_key.objectid > ino)
1332                         break;
1333                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1334                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1335                         path->slots[0]++;
1336                         goto next_slot;
1337                 }
1338                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1339                     found_key.offset > end)
1340                         break;
1341 
1342                 if (found_key.offset > cur_offset) {
1343                         extent_end = found_key.offset;
1344                         extent_type = 0;
1345                         goto out_check;
1346                 }
1347 
1348                 fi = btrfs_item_ptr(leaf, path->slots[0],
1349                                     struct btrfs_file_extent_item);
1350                 extent_type = btrfs_file_extent_type(leaf, fi);
1351 
1352                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1353                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1354                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1355                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1356                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1357                         extent_end = found_key.offset +
1358                                 btrfs_file_extent_num_bytes(leaf, fi);
1359                         disk_num_bytes =
1360                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1361                         if (extent_end <= start) {
1362                                 path->slots[0]++;
1363                                 goto next_slot;
1364                         }
1365                         if (disk_bytenr == 0)
1366                                 goto out_check;
1367                         if (btrfs_file_extent_compression(leaf, fi) ||
1368                             btrfs_file_extent_encryption(leaf, fi) ||
1369                             btrfs_file_extent_other_encoding(leaf, fi))
1370                                 goto out_check;
1371                         /*
1372                          * Do the same check as in btrfs_cross_ref_exist but
1373                          * without the unnecessary search.
1374                          */
1375                         if (!nolock &&
1376                             btrfs_file_extent_generation(leaf, fi) <=
1377                             btrfs_root_last_snapshot(&root->root_item))
1378                                 goto out_check;
1379                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1380                                 goto out_check;
1381                         if (btrfs_extent_readonly(fs_info, disk_bytenr))
1382                                 goto out_check;
1383                         ret = btrfs_cross_ref_exist(root, ino,
1384                                                     found_key.offset -
1385                                                     extent_offset, disk_bytenr);
1386                         if (ret) {
1387                                 /*
1388                                  * ret could be -EIO if the above fails to read
1389                                  * metadata.
1390                                  */
1391                                 if (ret < 0) {
1392                                         if (cow_start != (u64)-1)
1393                                                 cur_offset = cow_start;
1394                                         goto error;
1395                                 }
1396 
1397                                 WARN_ON_ONCE(nolock);
1398                                 goto out_check;
1399                         }
1400                         disk_bytenr += extent_offset;
1401                         disk_bytenr += cur_offset - found_key.offset;
1402                         num_bytes = min(end + 1, extent_end) - cur_offset;
1403                         /*
1404                          * if there are pending snapshots for this root,
1405                          * we fall into common COW way.
1406                          */
1407                         if (!nolock && atomic_read(&root->snapshot_force_cow))
1408                                 goto out_check;
1409                         /*
1410                          * force cow if csum exists in the range.
1411                          * this ensure that csum for a given extent are
1412                          * either valid or do not exist.
1413                          */
1414                         ret = csum_exist_in_range(fs_info, disk_bytenr,
1415                                                   num_bytes);
1416                         if (ret) {
1417                                 /*
1418                                  * ret could be -EIO if the above fails to read
1419                                  * metadata.
1420                                  */
1421                                 if (ret < 0) {
1422                                         if (cow_start != (u64)-1)
1423                                                 cur_offset = cow_start;
1424                                         goto error;
1425                                 }
1426                                 WARN_ON_ONCE(nolock);
1427                                 goto out_check;
1428                         }
1429                         if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1430                                 goto out_check;
1431                         nocow = 1;
1432                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1433                         extent_end = found_key.offset +
1434                                 btrfs_file_extent_ram_bytes(leaf, fi);
1435                         extent_end = ALIGN(extent_end,
1436                                            fs_info->sectorsize);
1437                 } else {
1438                         BUG_ON(1);
1439                 }
1440 out_check:
1441                 if (extent_end <= start) {
1442                         path->slots[0]++;
1443                         if (nocow)
1444                                 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1445                         goto next_slot;
1446                 }
1447                 if (!nocow) {
1448                         if (cow_start == (u64)-1)
1449                                 cow_start = cur_offset;
1450                         cur_offset = extent_end;
1451                         if (cur_offset > end)
1452                                 break;
1453                         path->slots[0]++;
1454                         goto next_slot;
1455                 }
1456 
1457                 btrfs_release_path(path);
1458                 if (cow_start != (u64)-1) {
1459                         ret = cow_file_range(inode, locked_page,
1460                                              cow_start, found_key.offset - 1,
1461                                              end, page_started, nr_written, 1,
1462                                              NULL);
1463                         if (ret) {
1464                                 if (nocow)
1465                                         btrfs_dec_nocow_writers(fs_info,
1466                                                                 disk_bytenr);
1467                                 goto error;
1468                         }
1469                         cow_start = (u64)-1;
1470                 }
1471 
1472                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1473                         u64 orig_start = found_key.offset - extent_offset;
1474 
1475                         em = create_io_em(inode, cur_offset, num_bytes,
1476                                           orig_start,
1477                                           disk_bytenr, /* block_start */
1478                                           num_bytes, /* block_len */
1479                                           disk_num_bytes, /* orig_block_len */
1480                                           ram_bytes, BTRFS_COMPRESS_NONE,
1481                                           BTRFS_ORDERED_PREALLOC);
1482                         if (IS_ERR(em)) {
1483                                 if (nocow)
1484                                         btrfs_dec_nocow_writers(fs_info,
1485                                                                 disk_bytenr);
1486                                 ret = PTR_ERR(em);
1487                                 goto error;
1488                         }
1489                         free_extent_map(em);
1490                 }
1491 
1492                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1493                         type = BTRFS_ORDERED_PREALLOC;
1494                 } else {
1495                         type = BTRFS_ORDERED_NOCOW;
1496                 }
1497 
1498                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1499                                                num_bytes, num_bytes, type);
1500                 if (nocow)
1501                         btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1502                 BUG_ON(ret); /* -ENOMEM */
1503 
1504                 if (root->root_key.objectid ==
1505                     BTRFS_DATA_RELOC_TREE_OBJECTID)
1506                         /*
1507                          * Error handled later, as we must prevent
1508                          * extent_clear_unlock_delalloc() in error handler
1509                          * from freeing metadata of created ordered extent.
1510                          */
1511                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1512                                                       num_bytes);
1513 
1514                 extent_clear_unlock_delalloc(inode, cur_offset,
1515                                              cur_offset + num_bytes - 1, end,
1516                                              locked_page, EXTENT_LOCKED |
1517                                              EXTENT_DELALLOC |
1518                                              EXTENT_CLEAR_DATA_RESV,
1519                                              PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1520 
1521                 cur_offset = extent_end;
1522 
1523                 /*
1524                  * btrfs_reloc_clone_csums() error, now we're OK to call error
1525                  * handler, as metadata for created ordered extent will only
1526                  * be freed by btrfs_finish_ordered_io().
1527                  */
1528                 if (ret)
1529                         goto error;
1530                 if (cur_offset > end)
1531                         break;
1532         }
1533         btrfs_release_path(path);
1534 
1535         if (cur_offset <= end && cow_start == (u64)-1)
1536                 cow_start = cur_offset;
1537 
1538         if (cow_start != (u64)-1) {
1539                 cur_offset = end;
1540                 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1541                                      page_started, nr_written, 1, NULL);
1542                 if (ret)
1543                         goto error;
1544         }
1545 
1546 error:
1547         if (ret && cur_offset < end)
1548                 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1549                                              locked_page, EXTENT_LOCKED |
1550                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1551                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1552                                              PAGE_CLEAR_DIRTY |
1553                                              PAGE_SET_WRITEBACK |
1554                                              PAGE_END_WRITEBACK);
1555         btrfs_free_path(path);
1556         return ret;
1557 }
1558 
1559 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1560 {
1561 
1562         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1563             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1564                 return 0;
1565 
1566         /*
1567          * @defrag_bytes is a hint value, no spinlock held here,
1568          * if is not zero, it means the file is defragging.
1569          * Force cow if given extent needs to be defragged.
1570          */
1571         if (BTRFS_I(inode)->defrag_bytes &&
1572             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1573                            EXTENT_DEFRAG, 0, NULL))
1574                 return 1;
1575 
1576         return 0;
1577 }
1578 
1579 /*
1580  * extent_io.c call back to do delayed allocation processing
1581  */
1582 static int run_delalloc_range(void *private_data, struct page *locked_page,
1583                               u64 start, u64 end, int *page_started,
1584                               unsigned long *nr_written,
1585                               struct writeback_control *wbc)
1586 {
1587         struct inode *inode = private_data;
1588         int ret;
1589         int force_cow = need_force_cow(inode, start, end);
1590         unsigned int write_flags = wbc_to_write_flags(wbc);
1591 
1592         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1593                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1594                                          page_started, 1, nr_written);
1595         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1596                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1597                                          page_started, 0, nr_written);
1598         } else if (!inode_need_compress(inode, start, end)) {
1599                 ret = cow_file_range(inode, locked_page, start, end, end,
1600                                       page_started, nr_written, 1, NULL);
1601         } else {
1602                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1603                         &BTRFS_I(inode)->runtime_flags);
1604                 ret = cow_file_range_async(inode, locked_page, start, end,
1605                                            page_started, nr_written,
1606                                            write_flags);
1607         }
1608         if (ret)
1609                 btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
1610         return ret;
1611 }
1612 
1613 static void btrfs_split_extent_hook(void *private_data,
1614                                     struct extent_state *orig, u64 split)
1615 {
1616         struct inode *inode = private_data;
1617         u64 size;
1618 
1619         /* not delalloc, ignore it */
1620         if (!(orig->state & EXTENT_DELALLOC))
1621                 return;
1622 
1623         size = orig->end - orig->start + 1;
1624         if (size > BTRFS_MAX_EXTENT_SIZE) {
1625                 u32 num_extents;
1626                 u64 new_size;
1627 
1628                 /*
1629                  * See the explanation in btrfs_merge_extent_hook, the same
1630                  * applies here, just in reverse.
1631                  */
1632                 new_size = orig->end - split + 1;
1633                 num_extents = count_max_extents(new_size);
1634                 new_size = split - orig->start;
1635                 num_extents += count_max_extents(new_size);
1636                 if (count_max_extents(size) >= num_extents)
1637                         return;
1638         }
1639 
1640         spin_lock(&BTRFS_I(inode)->lock);
1641         btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1642         spin_unlock(&BTRFS_I(inode)->lock);
1643 }
1644 
1645 /*
1646  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1647  * extents so we can keep track of new extents that are just merged onto old
1648  * extents, such as when we are doing sequential writes, so we can properly
1649  * account for the metadata space we'll need.
1650  */
1651 static void btrfs_merge_extent_hook(void *private_data,
1652                                     struct extent_state *new,
1653                                     struct extent_state *other)
1654 {
1655         struct inode *inode = private_data;
1656         u64 new_size, old_size;
1657         u32 num_extents;
1658 
1659         /* not delalloc, ignore it */
1660         if (!(other->state & EXTENT_DELALLOC))
1661                 return;
1662 
1663         if (new->start > other->start)
1664                 new_size = new->end - other->start + 1;
1665         else
1666                 new_size = other->end - new->start + 1;
1667 
1668         /* we're not bigger than the max, unreserve the space and go */
1669         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1670                 spin_lock(&BTRFS_I(inode)->lock);
1671                 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1672                 spin_unlock(&BTRFS_I(inode)->lock);
1673                 return;
1674         }
1675 
1676         /*
1677          * We have to add up either side to figure out how many extents were
1678          * accounted for before we merged into one big extent.  If the number of
1679          * extents we accounted for is <= the amount we need for the new range
1680          * then we can return, otherwise drop.  Think of it like this
1681          *
1682          * [ 4k][MAX_SIZE]
1683          *
1684          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1685          * need 2 outstanding extents, on one side we have 1 and the other side
1686          * we have 1 so they are == and we can return.  But in this case
1687          *
1688          * [MAX_SIZE+4k][MAX_SIZE+4k]
1689          *
1690          * Each range on their own accounts for 2 extents, but merged together
1691          * they are only 3 extents worth of accounting, so we need to drop in
1692          * this case.
1693          */
1694         old_size = other->end - other->start + 1;
1695         num_extents = count_max_extents(old_size);
1696         old_size = new->end - new->start + 1;
1697         num_extents += count_max_extents(old_size);
1698         if (count_max_extents(new_size) >= num_extents)
1699                 return;
1700 
1701         spin_lock(&BTRFS_I(inode)->lock);
1702         btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1703         spin_unlock(&BTRFS_I(inode)->lock);
1704 }
1705 
1706 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1707                                       struct inode *inode)
1708 {
1709         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1710 
1711         spin_lock(&root->delalloc_lock);
1712         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1713                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1714                               &root->delalloc_inodes);
1715                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1716                         &BTRFS_I(inode)->runtime_flags);
1717                 root->nr_delalloc_inodes++;
1718                 if (root->nr_delalloc_inodes == 1) {
1719                         spin_lock(&fs_info->delalloc_root_lock);
1720                         BUG_ON(!list_empty(&root->delalloc_root));
1721                         list_add_tail(&root->delalloc_root,
1722                                       &fs_info->delalloc_roots);
1723                         spin_unlock(&fs_info->delalloc_root_lock);
1724                 }
1725         }
1726         spin_unlock(&root->delalloc_lock);
1727 }
1728 
1729 
1730 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1731                                 struct btrfs_inode *inode)
1732 {
1733         struct btrfs_fs_info *fs_info = root->fs_info;
1734 
1735         if (!list_empty(&inode->delalloc_inodes)) {
1736                 list_del_init(&inode->delalloc_inodes);
1737                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1738                           &inode->runtime_flags);
1739                 root->nr_delalloc_inodes--;
1740                 if (!root->nr_delalloc_inodes) {
1741                         ASSERT(list_empty(&root->delalloc_inodes));
1742                         spin_lock(&fs_info->delalloc_root_lock);
1743                         BUG_ON(list_empty(&root->delalloc_root));
1744                         list_del_init(&root->delalloc_root);
1745                         spin_unlock(&fs_info->delalloc_root_lock);
1746                 }
1747         }
1748 }
1749 
1750 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1751                                      struct btrfs_inode *inode)
1752 {
1753         spin_lock(&root->delalloc_lock);
1754         __btrfs_del_delalloc_inode(root, inode);
1755         spin_unlock(&root->delalloc_lock);
1756 }
1757 
1758 /*
1759  * extent_io.c set_bit_hook, used to track delayed allocation
1760  * bytes in this file, and to maintain the list of inodes that
1761  * have pending delalloc work to be done.
1762  */
1763 static void btrfs_set_bit_hook(void *private_data,
1764                                struct extent_state *state, unsigned *bits)
1765 {
1766         struct inode *inode = private_data;
1767 
1768         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1769 
1770         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1771                 WARN_ON(1);
1772         /*
1773          * set_bit and clear bit hooks normally require _irqsave/restore
1774          * but in this case, we are only testing for the DELALLOC
1775          * bit, which is only set or cleared with irqs on
1776          */
1777         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1778                 struct btrfs_root *root = BTRFS_I(inode)->root;
1779                 u64 len = state->end + 1 - state->start;
1780                 u32 num_extents = count_max_extents(len);
1781                 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1782 
1783                 spin_lock(&BTRFS_I(inode)->lock);
1784                 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
1785                 spin_unlock(&BTRFS_I(inode)->lock);
1786 
1787                 /* For sanity tests */
1788                 if (btrfs_is_testing(fs_info))
1789                         return;
1790 
1791                 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1792                                          fs_info->delalloc_batch);
1793                 spin_lock(&BTRFS_I(inode)->lock);
1794                 BTRFS_I(inode)->delalloc_bytes += len;
1795                 if (*bits & EXTENT_DEFRAG)
1796                         BTRFS_I(inode)->defrag_bytes += len;
1797                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1798                                          &BTRFS_I(inode)->runtime_flags))
1799                         btrfs_add_delalloc_inodes(root, inode);
1800                 spin_unlock(&BTRFS_I(inode)->lock);
1801         }
1802 
1803         if (!(state->state & EXTENT_DELALLOC_NEW) &&
1804             (*bits & EXTENT_DELALLOC_NEW)) {
1805                 spin_lock(&BTRFS_I(inode)->lock);
1806                 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1807                         state->start;
1808                 spin_unlock(&BTRFS_I(inode)->lock);
1809         }
1810 }
1811 
1812 /*
1813  * extent_io.c clear_bit_hook, see set_bit_hook for why
1814  */
1815 static void btrfs_clear_bit_hook(void *private_data,
1816                                  struct extent_state *state,
1817                                  unsigned *bits)
1818 {
1819         struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data);
1820         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1821         u64 len = state->end + 1 - state->start;
1822         u32 num_extents = count_max_extents(len);
1823 
1824         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1825                 spin_lock(&inode->lock);
1826                 inode->defrag_bytes -= len;
1827                 spin_unlock(&inode->lock);
1828         }
1829 
1830         /*
1831          * set_bit and clear bit hooks normally require _irqsave/restore
1832          * but in this case, we are only testing for the DELALLOC
1833          * bit, which is only set or cleared with irqs on
1834          */
1835         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1836                 struct btrfs_root *root = inode->root;
1837                 bool do_list = !btrfs_is_free_space_inode(inode);
1838 
1839                 spin_lock(&inode->lock);
1840                 btrfs_mod_outstanding_extents(inode, -num_extents);
1841                 spin_unlock(&inode->lock);
1842 
1843                 /*
1844                  * We don't reserve metadata space for space cache inodes so we
1845                  * don't need to call dellalloc_release_metadata if there is an
1846                  * error.
1847                  */
1848                 if (*bits & EXTENT_CLEAR_META_RESV &&
1849                     root != fs_info->tree_root)
1850                         btrfs_delalloc_release_metadata(inode, len, false);
1851 
1852                 /* For sanity tests. */
1853                 if (btrfs_is_testing(fs_info))
1854                         return;
1855 
1856                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1857                     do_list && !(state->state & EXTENT_NORESERVE) &&
1858                     (*bits & EXTENT_CLEAR_DATA_RESV))
1859                         btrfs_free_reserved_data_space_noquota(
1860                                         &inode->vfs_inode,
1861                                         state->start, len);
1862 
1863                 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
1864                                          fs_info->delalloc_batch);
1865                 spin_lock(&inode->lock);
1866                 inode->delalloc_bytes -= len;
1867                 if (do_list && inode->delalloc_bytes == 0 &&
1868                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1869                                         &inode->runtime_flags))
1870                         btrfs_del_delalloc_inode(root, inode);
1871                 spin_unlock(&inode->lock);
1872         }
1873 
1874         if ((state->state & EXTENT_DELALLOC_NEW) &&
1875             (*bits & EXTENT_DELALLOC_NEW)) {
1876                 spin_lock(&inode->lock);
1877                 ASSERT(inode->new_delalloc_bytes >= len);
1878                 inode->new_delalloc_bytes -= len;
1879                 spin_unlock(&inode->lock);
1880         }
1881 }
1882 
1883 /*
1884  * Merge bio hook, this must check the chunk tree to make sure we don't create
1885  * bios that span stripes or chunks
1886  *
1887  * return 1 if page cannot be merged to bio
1888  * return 0 if page can be merged to bio
1889  * return error otherwise
1890  */
1891 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1892                          size_t size, struct bio *bio,
1893                          unsigned long bio_flags)
1894 {
1895         struct inode *inode = page->mapping->host;
1896         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1897         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1898         u64 length = 0;
1899         u64 map_length;
1900         int ret;
1901 
1902         if (bio_flags & EXTENT_BIO_COMPRESSED)
1903                 return 0;
1904 
1905         length = bio->bi_iter.bi_size;
1906         map_length = length;
1907         ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
1908                               NULL, 0);
1909         if (ret < 0)
1910                 return ret;
1911         if (map_length < length + size)
1912                 return 1;
1913         return 0;
1914 }
1915 
1916 /*
1917  * in order to insert checksums into the metadata in large chunks,
1918  * we wait until bio submission time.   All the pages in the bio are
1919  * checksummed and sums are attached onto the ordered extent record.
1920  *
1921  * At IO completion time the cums attached on the ordered extent record
1922  * are inserted into the btree
1923  */
1924 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
1925                                     u64 bio_offset)
1926 {
1927         struct inode *inode = private_data;
1928         blk_status_t ret = 0;
1929 
1930         ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1931         BUG_ON(ret); /* -ENOMEM */
1932         return 0;
1933 }
1934 
1935 /*
1936  * in order to insert checksums into the metadata in large chunks,
1937  * we wait until bio submission time.   All the pages in the bio are
1938  * checksummed and sums are attached onto the ordered extent record.
1939  *
1940  * At IO completion time the cums attached on the ordered extent record
1941  * are inserted into the btree
1942  */
1943 blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
1944                           int mirror_num)
1945 {
1946         struct inode *inode = private_data;
1947         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1948         blk_status_t ret;
1949 
1950         ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
1951         if (ret) {
1952                 bio->bi_status = ret;
1953                 bio_endio(bio);
1954         }
1955         return ret;
1956 }
1957 
1958 /*
1959  * extent_io.c submission hook. This does the right thing for csum calculation
1960  * on write, or reading the csums from the tree before a read.
1961  *
1962  * Rules about async/sync submit,
1963  * a) read:                             sync submit
1964  *
1965  * b) write without checksum:           sync submit
1966  *
1967  * c) write with checksum:
1968  *    c-1) if bio is issued by fsync:   sync submit
1969  *         (sync_writers != 0)
1970  *
1971  *    c-2) if root is reloc root:       sync submit
1972  *         (only in case of buffered IO)
1973  *
1974  *    c-3) otherwise:                   async submit
1975  */
1976 static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
1977                                  int mirror_num, unsigned long bio_flags,
1978                                  u64 bio_offset)
1979 {
1980         struct inode *inode = private_data;
1981         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1982         struct btrfs_root *root = BTRFS_I(inode)->root;
1983         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1984         blk_status_t ret = 0;
1985         int skip_sum;
1986         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1987 
1988         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1989 
1990         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
1991                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1992 
1993         if (bio_op(bio) != REQ_OP_WRITE) {
1994                 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
1995                 if (ret)
1996                         goto out;
1997 
1998                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1999                         ret = btrfs_submit_compressed_read(inode, bio,
2000                                                            mirror_num,
2001                                                            bio_flags);
2002                         goto out;
2003                 } else if (!skip_sum) {
2004                         ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2005                         if (ret)
2006                                 goto out;
2007                 }
2008                 goto mapit;
2009         } else if (async && !skip_sum) {
2010                 /* csum items have already been cloned */
2011                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2012                         goto mapit;
2013                 /* we're doing a write, do the async checksumming */
2014                 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2015                                           bio_offset, inode,
2016                                           btrfs_submit_bio_start);
2017                 goto out;
2018         } else if (!skip_sum) {
2019                 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2020                 if (ret)
2021                         goto out;
2022         }
2023 
2024 mapit:
2025         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
2026 
2027 out:
2028         if (ret) {
2029                 bio->bi_status = ret;
2030                 bio_endio(bio);
2031         }
2032         return ret;
2033 }
2034 
2035 /*
2036  * given a list of ordered sums record them in the inode.  This happens
2037  * at IO completion time based on sums calculated at bio submission time.
2038  */
2039 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2040                              struct inode *inode, struct list_head *list)
2041 {
2042         struct btrfs_ordered_sum *sum;
2043         int ret;
2044 
2045         list_for_each_entry(sum, list, list) {
2046                 trans->adding_csums = true;
2047                 ret = btrfs_csum_file_blocks(trans,
2048                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
2049                 trans->adding_csums = false;
2050                 if (ret)
2051                         return ret;
2052         }
2053         return 0;
2054 }
2055 
2056 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2057                               unsigned int extra_bits,
2058                               struct extent_state **cached_state, int dedupe)
2059 {
2060         WARN_ON((end & (PAGE_SIZE - 1)) == 0);
2061         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2062                                    extra_bits, cached_state);
2063 }
2064 
2065 /* see btrfs_writepage_start_hook for details on why this is required */
2066 struct btrfs_writepage_fixup {
2067         struct page *page;
2068         struct btrfs_work work;
2069 };
2070 
2071 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2072 {
2073         struct btrfs_writepage_fixup *fixup;
2074         struct btrfs_ordered_extent *ordered;
2075         struct extent_state *cached_state = NULL;
2076         struct extent_changeset *data_reserved = NULL;
2077         struct page *page;
2078         struct inode *inode;
2079         u64 page_start;
2080         u64 page_end;
2081         int ret;
2082 
2083         fixup = container_of(work, struct btrfs_writepage_fixup, work);
2084         page = fixup->page;
2085 again:
2086         lock_page(page);
2087         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2088                 ClearPageChecked(page);
2089                 goto out_page;
2090         }
2091 
2092         inode = page->mapping->host;
2093         page_start = page_offset(page);
2094         page_end = page_offset(page) + PAGE_SIZE - 1;
2095 
2096         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2097                          &cached_state);
2098 
2099         /* already ordered? We're done */
2100         if (PagePrivate2(page))
2101                 goto out;
2102 
2103         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2104                                         PAGE_SIZE);
2105         if (ordered) {
2106                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2107                                      page_end, &cached_state);
2108                 unlock_page(page);
2109                 btrfs_start_ordered_extent(inode, ordered, 1);
2110                 btrfs_put_ordered_extent(ordered);
2111                 goto again;
2112         }
2113 
2114         ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2115                                            PAGE_SIZE);
2116         if (ret) {
2117                 mapping_set_error(page->mapping, ret);
2118                 end_extent_writepage(page, ret, page_start, page_end);
2119                 ClearPageChecked(page);
2120                 goto out;
2121          }
2122 
2123         ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2124                                         &cached_state, 0);
2125         if (ret) {
2126                 mapping_set_error(page->mapping, ret);
2127                 end_extent_writepage(page, ret, page_start, page_end);
2128                 ClearPageChecked(page);
2129                 goto out;
2130         }
2131 
2132         ClearPageChecked(page);
2133         set_page_dirty(page);
2134         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
2135 out:
2136         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2137                              &cached_state);
2138 out_page:
2139         unlock_page(page);
2140         put_page(page);
2141         kfree(fixup);
2142         extent_changeset_free(data_reserved);
2143 }
2144 
2145 /*
2146  * There are a few paths in the higher layers of the kernel that directly
2147  * set the page dirty bit without asking the filesystem if it is a
2148  * good idea.  This causes problems because we want to make sure COW
2149  * properly happens and the data=ordered rules are followed.
2150  *
2151  * In our case any range that doesn't have the ORDERED bit set
2152  * hasn't been properly setup for IO.  We kick off an async process
2153  * to fix it up.  The async helper will wait for ordered extents, set
2154  * the delalloc bit and make it safe to write the page.
2155  */
2156 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2157 {
2158         struct inode *inode = page->mapping->host;
2159         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2160         struct btrfs_writepage_fixup *fixup;
2161 
2162         /* this page is properly in the ordered list */
2163         if (TestClearPagePrivate2(page))
2164                 return 0;
2165 
2166         if (PageChecked(page))
2167                 return -EAGAIN;
2168 
2169         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2170         if (!fixup)
2171                 return -EAGAIN;
2172 
2173         SetPageChecked(page);
2174         get_page(page);
2175         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2176                         btrfs_writepage_fixup_worker, NULL, NULL);
2177         fixup->page = page;
2178         btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2179         return -EBUSY;
2180 }
2181 
2182 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2183                                        struct inode *inode, u64 file_pos,
2184                                        u64 disk_bytenr, u64 disk_num_bytes,
2185                                        u64 num_bytes, u64 ram_bytes,
2186                                        u8 compression, u8 encryption,
2187                                        u16 other_encoding, int extent_type)
2188 {
2189         struct btrfs_root *root = BTRFS_I(inode)->root;
2190         struct btrfs_file_extent_item *fi;
2191         struct btrfs_path *path;
2192         struct extent_buffer *leaf;
2193         struct btrfs_key ins;
2194         u64 qg_released;
2195         int extent_inserted = 0;
2196         int ret;
2197 
2198         path = btrfs_alloc_path();
2199         if (!path)
2200                 return -ENOMEM;
2201 
2202         /*
2203          * we may be replacing one extent in the tree with another.
2204          * The new extent is pinned in the extent map, and we don't want
2205          * to drop it from the cache until it is completely in the btree.
2206          *
2207          * So, tell btrfs_drop_extents to leave this extent in the cache.
2208          * the caller is expected to unpin it and allow it to be merged
2209          * with the others.
2210          */
2211         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2212                                    file_pos + num_bytes, NULL, 0,
2213                                    1, sizeof(*fi), &extent_inserted);
2214         if (ret)
2215                 goto out;
2216 
2217         if (!extent_inserted) {
2218                 ins.objectid = btrfs_ino(BTRFS_I(inode));
2219                 ins.offset = file_pos;
2220                 ins.type = BTRFS_EXTENT_DATA_KEY;
2221 
2222                 path->leave_spinning = 1;
2223                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2224                                               sizeof(*fi));
2225                 if (ret)
2226                         goto out;
2227         }
2228         leaf = path->nodes[0];
2229         fi = btrfs_item_ptr(leaf, path->slots[0],
2230                             struct btrfs_file_extent_item);
2231         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2232         btrfs_set_file_extent_type(leaf, fi, extent_type);
2233         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2234         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2235         btrfs_set_file_extent_offset(leaf, fi, 0);
2236         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2237         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2238         btrfs_set_file_extent_compression(leaf, fi, compression);
2239         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2240         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2241 
2242         btrfs_mark_buffer_dirty(leaf);
2243         btrfs_release_path(path);
2244 
2245         inode_add_bytes(inode, num_bytes);
2246 
2247         ins.objectid = disk_bytenr;
2248         ins.offset = disk_num_bytes;
2249         ins.type = BTRFS_EXTENT_ITEM_KEY;
2250 
2251         /*
2252          * Release the reserved range from inode dirty range map, as it is
2253          * already moved into delayed_ref_head
2254          */
2255         ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2256         if (ret < 0)
2257                 goto out;
2258         qg_released = ret;
2259         ret = btrfs_alloc_reserved_file_extent(trans, root,
2260                                                btrfs_ino(BTRFS_I(inode)),
2261                                                file_pos, qg_released, &ins);
2262 out:
2263         btrfs_free_path(path);
2264 
2265         return ret;
2266 }
2267 
2268 /* snapshot-aware defrag */
2269 struct sa_defrag_extent_backref {
2270         struct rb_node node;
2271         struct old_sa_defrag_extent *old;
2272         u64 root_id;
2273         u64 inum;
2274         u64 file_pos;
2275         u64 extent_offset;
2276         u64 num_bytes;
2277         u64 generation;
2278 };
2279 
2280 struct old_sa_defrag_extent {
2281         struct list_head list;
2282         struct new_sa_defrag_extent *new;
2283 
2284         u64 extent_offset;
2285         u64 bytenr;
2286         u64 offset;
2287         u64 len;
2288         int count;
2289 };
2290 
2291 struct new_sa_defrag_extent {
2292         struct rb_root root;
2293         struct list_head head;
2294         struct btrfs_path *path;
2295         struct inode *inode;
2296         u64 file_pos;
2297         u64 len;
2298         u64 bytenr;
2299         u64 disk_len;
2300         u8 compress_type;
2301 };
2302 
2303 static int backref_comp(struct sa_defrag_extent_backref *b1,
2304                         struct sa_defrag_extent_backref *b2)
2305 {
2306         if (b1->root_id < b2->root_id)
2307                 return -1;
2308         else if (b1->root_id > b2->root_id)
2309                 return 1;
2310 
2311         if (b1->inum < b2->inum)
2312                 return -1;
2313         else if (b1->inum > b2->inum)
2314                 return 1;
2315 
2316         if (b1->file_pos < b2->file_pos)
2317                 return -1;
2318         else if (b1->file_pos > b2->file_pos)
2319                 return 1;
2320 
2321         /*
2322          * [------------------------------] ===> (a range of space)
2323          *     |<--->|   |<---->| =============> (fs/file tree A)
2324          * |<---------------------------->| ===> (fs/file tree B)
2325          *
2326          * A range of space can refer to two file extents in one tree while
2327          * refer to only one file extent in another tree.
2328          *
2329          * So we may process a disk offset more than one time(two extents in A)
2330          * and locate at the same extent(one extent in B), then insert two same
2331          * backrefs(both refer to the extent in B).
2332          */
2333         return 0;
2334 }
2335 
2336 static void backref_insert(struct rb_root *root,
2337                            struct sa_defrag_extent_backref *backref)
2338 {
2339         struct rb_node **p = &root->rb_node;
2340         struct rb_node *parent = NULL;
2341         struct sa_defrag_extent_backref *entry;
2342         int ret;
2343 
2344         while (*p) {
2345                 parent = *p;
2346                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2347 
2348                 ret = backref_comp(backref, entry);
2349                 if (ret < 0)
2350                         p = &(*p)->rb_left;
2351                 else
2352                         p = &(*p)->rb_right;
2353         }
2354 
2355         rb_link_node(&backref->node, parent, p);
2356         rb_insert_color(&backref->node, root);
2357 }
2358 
2359 /*
2360  * Note the backref might has changed, and in this case we just return 0.
2361  */
2362 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2363                                        void *ctx)
2364 {
2365         struct btrfs_file_extent_item *extent;
2366         struct old_sa_defrag_extent *old = ctx;
2367         struct new_sa_defrag_extent *new = old->new;
2368         struct btrfs_path *path = new->path;
2369         struct btrfs_key key;
2370         struct btrfs_root *root;
2371         struct sa_defrag_extent_backref *backref;
2372         struct extent_buffer *leaf;
2373         struct inode *inode = new->inode;
2374         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2375         int slot;
2376         int ret;
2377         u64 extent_offset;
2378         u64 num_bytes;
2379 
2380         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2381             inum == btrfs_ino(BTRFS_I(inode)))
2382                 return 0;
2383 
2384         key.objectid = root_id;
2385         key.type = BTRFS_ROOT_ITEM_KEY;
2386         key.offset = (u64)-1;
2387 
2388         root = btrfs_read_fs_root_no_name(fs_info, &key);
2389         if (IS_ERR(root)) {
2390                 if (PTR_ERR(root) == -ENOENT)
2391                         return 0;
2392                 WARN_ON(1);
2393                 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2394                          inum, offset, root_id);
2395                 return PTR_ERR(root);
2396         }
2397 
2398         key.objectid = inum;
2399         key.type = BTRFS_EXTENT_DATA_KEY;
2400         if (offset > (u64)-1 << 32)
2401                 key.offset = 0;
2402         else
2403                 key.offset = offset;
2404 
2405         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2406         if (WARN_ON(ret < 0))
2407                 return ret;
2408         ret = 0;
2409 
2410         while (1) {
2411                 cond_resched();
2412 
2413                 leaf = path->nodes[0];
2414                 slot = path->slots[0];
2415 
2416                 if (slot >= btrfs_header_nritems(leaf)) {
2417                         ret = btrfs_next_leaf(root, path);
2418                         if (ret < 0) {
2419                                 goto out;
2420                         } else if (ret > 0) {
2421                                 ret = 0;
2422                                 goto out;
2423                         }
2424                         continue;
2425                 }
2426 
2427                 path->slots[0]++;
2428 
2429                 btrfs_item_key_to_cpu(leaf, &key, slot);
2430 
2431                 if (key.objectid > inum)
2432                         goto out;
2433 
2434                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2435                         continue;
2436 
2437                 extent = btrfs_item_ptr(leaf, slot,
2438                                         struct btrfs_file_extent_item);
2439 
2440                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2441                         continue;
2442 
2443                 /*
2444                  * 'offset' refers to the exact key.offset,
2445                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2446                  * (key.offset - extent_offset).
2447                  */
2448                 if (key.offset != offset)
2449                         continue;
2450 
2451                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2452                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2453 
2454                 if (extent_offset >= old->extent_offset + old->offset +
2455                     old->len || extent_offset + num_bytes <=
2456                     old->extent_offset + old->offset)
2457                         continue;
2458                 break;
2459         }
2460 
2461         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2462         if (!backref) {
2463                 ret = -ENOENT;
2464                 goto out;
2465         }
2466 
2467         backref->root_id = root_id;
2468         backref->inum = inum;
2469         backref->file_pos = offset;
2470         backref->num_bytes = num_bytes;
2471         backref->extent_offset = extent_offset;
2472         backref->generation = btrfs_file_extent_generation(leaf, extent);
2473         backref->old = old;
2474         backref_insert(&new->root, backref);
2475         old->count++;
2476 out:
2477         btrfs_release_path(path);
2478         WARN_ON(ret);
2479         return ret;
2480 }
2481 
2482 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2483                                    struct new_sa_defrag_extent *new)
2484 {
2485         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2486         struct old_sa_defrag_extent *old, *tmp;
2487         int ret;
2488 
2489         new->path = path;
2490 
2491         list_for_each_entry_safe(old, tmp, &new->head, list) {
2492                 ret = iterate_inodes_from_logical(old->bytenr +
2493                                                   old->extent_offset, fs_info,
2494                                                   path, record_one_backref,
2495                                                   old, false);
2496                 if (ret < 0 && ret != -ENOENT)
2497                         return false;
2498 
2499                 /* no backref to be processed for this extent */
2500                 if (!old->count) {
2501                         list_del(&old->list);
2502                         kfree(old);
2503                 }
2504         }
2505 
2506         if (list_empty(&new->head))
2507                 return false;
2508 
2509         return true;
2510 }
2511 
2512 static int relink_is_mergable(struct extent_buffer *leaf,
2513                               struct btrfs_file_extent_item *fi,
2514                               struct new_sa_defrag_extent *new)
2515 {
2516         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2517                 return 0;
2518 
2519         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2520                 return 0;
2521 
2522         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2523                 return 0;
2524 
2525         if (btrfs_file_extent_encryption(leaf, fi) ||
2526             btrfs_file_extent_other_encoding(leaf, fi))
2527                 return 0;
2528 
2529         return 1;
2530 }
2531 
2532 /*
2533  * Note the backref might has changed, and in this case we just return 0.
2534  */
2535 static noinline int relink_extent_backref(struct btrfs_path *path,
2536                                  struct sa_defrag_extent_backref *prev,
2537                                  struct sa_defrag_extent_backref *backref)
2538 {
2539         struct btrfs_file_extent_item *extent;
2540         struct btrfs_file_extent_item *item;
2541         struct btrfs_ordered_extent *ordered;
2542         struct btrfs_trans_handle *trans;
2543         struct btrfs_root *root;
2544         struct btrfs_key key;
2545         struct extent_buffer *leaf;
2546         struct old_sa_defrag_extent *old = backref->old;
2547         struct new_sa_defrag_extent *new = old->new;
2548         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2549         struct inode *inode;
2550         struct extent_state *cached = NULL;
2551         int ret = 0;
2552         u64 start;
2553         u64 len;
2554         u64 lock_start;
2555         u64 lock_end;
2556         bool merge = false;
2557         int index;
2558 
2559         if (prev && prev->root_id == backref->root_id &&
2560             prev->inum == backref->inum &&
2561             prev->file_pos + prev->num_bytes == backref->file_pos)
2562                 merge = true;
2563 
2564         /* step 1: get root */
2565         key.objectid = backref->root_id;
2566         key.type = BTRFS_ROOT_ITEM_KEY;
2567         key.offset = (u64)-1;
2568 
2569         index = srcu_read_lock(&fs_info->subvol_srcu);
2570 
2571         root = btrfs_read_fs_root_no_name(fs_info, &key);
2572         if (IS_ERR(root)) {
2573                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2574                 if (PTR_ERR(root) == -ENOENT)
2575                         return 0;
2576                 return PTR_ERR(root);
2577         }
2578 
2579         if (btrfs_root_readonly(root)) {
2580                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2581                 return 0;
2582         }
2583 
2584         /* step 2: get inode */
2585         key.objectid = backref->inum;
2586         key.type = BTRFS_INODE_ITEM_KEY;
2587         key.offset = 0;
2588 
2589         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2590         if (IS_ERR(inode)) {
2591                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2592                 return 0;
2593         }
2594 
2595         srcu_read_unlock(&fs_info->subvol_srcu, index);
2596 
2597         /* step 3: relink backref */
2598         lock_start = backref->file_pos;
2599         lock_end = backref->file_pos + backref->num_bytes - 1;
2600         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2601                          &cached);
2602 
2603         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2604         if (ordered) {
2605                 btrfs_put_ordered_extent(ordered);
2606                 goto out_unlock;
2607         }
2608 
2609         trans = btrfs_join_transaction(root);
2610         if (IS_ERR(trans)) {
2611                 ret = PTR_ERR(trans);
2612                 goto out_unlock;
2613         }
2614 
2615         key.objectid = backref->inum;
2616         key.type = BTRFS_EXTENT_DATA_KEY;
2617         key.offset = backref->file_pos;
2618 
2619         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2620         if (ret < 0) {
2621                 goto out_free_path;
2622         } else if (ret > 0) {
2623                 ret = 0;
2624                 goto out_free_path;
2625         }
2626 
2627         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2628                                 struct btrfs_file_extent_item);
2629 
2630         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2631             backref->generation)
2632                 goto out_free_path;
2633 
2634         btrfs_release_path(path);
2635 
2636         start = backref->file_pos;
2637         if (backref->extent_offset < old->extent_offset + old->offset)
2638                 start += old->extent_offset + old->offset -
2639                          backref->extent_offset;
2640 
2641         len = min(backref->extent_offset + backref->num_bytes,
2642                   old->extent_offset + old->offset + old->len);
2643         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2644 
2645         ret = btrfs_drop_extents(trans, root, inode, start,
2646                                  start + len, 1);
2647         if (ret)
2648                 goto out_free_path;
2649 again:
2650         key.objectid = btrfs_ino(BTRFS_I(inode));
2651         key.type = BTRFS_EXTENT_DATA_KEY;
2652         key.offset = start;
2653 
2654         path->leave_spinning = 1;
2655         if (merge) {
2656                 struct btrfs_file_extent_item *fi;
2657                 u64 extent_len;
2658                 struct btrfs_key found_key;
2659 
2660                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2661                 if (ret < 0)
2662                         goto out_free_path;
2663 
2664                 path->slots[0]--;
2665                 leaf = path->nodes[0];
2666                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2667 
2668                 fi = btrfs_item_ptr(leaf, path->slots[0],
2669                                     struct btrfs_file_extent_item);
2670                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2671 
2672                 if (extent_len + found_key.offset == start &&
2673                     relink_is_mergable(leaf, fi, new)) {
2674                         btrfs_set_file_extent_num_bytes(leaf, fi,
2675                                                         extent_len + len);
2676                         btrfs_mark_buffer_dirty(leaf);
2677                         inode_add_bytes(inode, len);
2678 
2679                         ret = 1;
2680                         goto out_free_path;
2681                 } else {
2682                         merge = false;
2683                         btrfs_release_path(path);
2684                         goto again;
2685                 }
2686         }
2687 
2688         ret = btrfs_insert_empty_item(trans, root, path, &key,
2689                                         sizeof(*extent));
2690         if (ret) {
2691                 btrfs_abort_transaction(trans, ret);
2692                 goto out_free_path;
2693         }
2694 
2695         leaf = path->nodes[0];
2696         item = btrfs_item_ptr(leaf, path->slots[0],
2697                                 struct btrfs_file_extent_item);
2698         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2699         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2700         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2701         btrfs_set_file_extent_num_bytes(leaf, item, len);
2702         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2703         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2704         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2705         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2706         btrfs_set_file_extent_encryption(leaf, item, 0);
2707         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2708 
2709         btrfs_mark_buffer_dirty(leaf);
2710         inode_add_bytes(inode, len);
2711         btrfs_release_path(path);
2712 
2713         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2714                         new->disk_len, 0,
2715                         backref->root_id, backref->inum,
2716                         new->file_pos); /* start - extent_offset */
2717         if (ret) {
2718                 btrfs_abort_transaction(trans, ret);
2719                 goto out_free_path;
2720         }
2721 
2722         ret = 1;
2723 out_free_path:
2724         btrfs_release_path(path);
2725         path->leave_spinning = 0;
2726         btrfs_end_transaction(trans);
2727 out_unlock:
2728         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2729                              &cached);
2730         iput(inode);
2731         return ret;
2732 }
2733 
2734 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2735 {
2736         struct old_sa_defrag_extent *old, *tmp;
2737 
2738         if (!new)
2739                 return;
2740 
2741         list_for_each_entry_safe(old, tmp, &new->head, list) {
2742                 kfree(old);
2743         }
2744         kfree(new);
2745 }
2746 
2747 static void relink_file_extents(struct new_sa_defrag_extent *new)
2748 {
2749         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2750         struct btrfs_path *path;
2751         struct sa_defrag_extent_backref *backref;
2752         struct sa_defrag_extent_backref *prev = NULL;
2753         struct rb_node *node;
2754         int ret;
2755 
2756         path = btrfs_alloc_path();
2757         if (!path)
2758                 return;
2759 
2760         if (!record_extent_backrefs(path, new)) {
2761                 btrfs_free_path(path);
2762                 goto out;
2763         }
2764         btrfs_release_path(path);
2765 
2766         while (1) {
2767                 node = rb_first(&new->root);
2768                 if (!node)
2769                         break;
2770                 rb_erase(node, &new->root);
2771 
2772                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2773 
2774                 ret = relink_extent_backref(path, prev, backref);
2775                 WARN_ON(ret < 0);
2776 
2777                 kfree(prev);
2778 
2779                 if (ret == 1)
2780                         prev = backref;
2781                 else
2782                         prev = NULL;
2783                 cond_resched();
2784         }
2785         kfree(prev);
2786 
2787         btrfs_free_path(path);
2788 out:
2789         free_sa_defrag_extent(new);
2790 
2791         atomic_dec(&fs_info->defrag_running);
2792         wake_up(&fs_info->transaction_wait);
2793 }
2794 
2795 static struct new_sa_defrag_extent *
2796 record_old_file_extents(struct inode *inode,
2797                         struct btrfs_ordered_extent *ordered)
2798 {
2799         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2800         struct btrfs_root *root = BTRFS_I(inode)->root;
2801         struct btrfs_path *path;
2802         struct btrfs_key key;
2803         struct old_sa_defrag_extent *old;
2804         struct new_sa_defrag_extent *new;
2805         int ret;
2806 
2807         new = kmalloc(sizeof(*new), GFP_NOFS);
2808         if (!new)
2809                 return NULL;
2810 
2811         new->inode = inode;
2812         new->file_pos = ordered->file_offset;
2813         new->len = ordered->len;
2814         new->bytenr = ordered->start;
2815         new->disk_len = ordered->disk_len;
2816         new->compress_type = ordered->compress_type;
2817         new->root = RB_ROOT;
2818         INIT_LIST_HEAD(&new->head);
2819 
2820         path = btrfs_alloc_path();
2821         if (!path)
2822                 goto out_kfree;
2823 
2824         key.objectid = btrfs_ino(BTRFS_I(inode));
2825         key.type = BTRFS_EXTENT_DATA_KEY;
2826         key.offset = new->file_pos;
2827 
2828         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2829         if (ret < 0)
2830                 goto out_free_path;
2831         if (ret > 0 && path->slots[0] > 0)
2832                 path->slots[0]--;
2833 
2834         /* find out all the old extents for the file range */
2835         while (1) {
2836                 struct btrfs_file_extent_item *extent;
2837                 struct extent_buffer *l;
2838                 int slot;
2839                 u64 num_bytes;
2840                 u64 offset;
2841                 u64 end;
2842                 u64 disk_bytenr;
2843                 u64 extent_offset;
2844 
2845                 l = path->nodes[0];
2846                 slot = path->slots[0];
2847 
2848                 if (slot >= btrfs_header_nritems(l)) {
2849                         ret = btrfs_next_leaf(root, path);
2850                         if (ret < 0)
2851                                 goto out_free_path;
2852                         else if (ret > 0)
2853                                 break;
2854                         continue;
2855                 }
2856 
2857                 btrfs_item_key_to_cpu(l, &key, slot);
2858 
2859                 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
2860                         break;
2861                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2862                         break;
2863                 if (key.offset >= new->file_pos + new->len)
2864                         break;
2865 
2866                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2867 
2868                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2869                 if (key.offset + num_bytes < new->file_pos)
2870                         goto next;
2871 
2872                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2873                 if (!disk_bytenr)
2874                         goto next;
2875 
2876                 extent_offset = btrfs_file_extent_offset(l, extent);
2877 
2878                 old = kmalloc(sizeof(*old), GFP_NOFS);
2879                 if (!old)
2880                         goto out_free_path;
2881 
2882                 offset = max(new->file_pos, key.offset);
2883                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2884 
2885                 old->bytenr = disk_bytenr;
2886                 old->extent_offset = extent_offset;
2887                 old->offset = offset - key.offset;
2888                 old->len = end - offset;
2889                 old->new = new;
2890                 old->count = 0;
2891                 list_add_tail(&old->list, &new->head);
2892 next:
2893                 path->slots[0]++;
2894                 cond_resched();
2895         }
2896 
2897         btrfs_free_path(path);
2898         atomic_inc(&fs_info->defrag_running);
2899 
2900         return new;
2901 
2902 out_free_path:
2903         btrfs_free_path(path);
2904 out_kfree:
2905         free_sa_defrag_extent(new);
2906         return NULL;
2907 }
2908 
2909 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2910                                          u64 start, u64 len)
2911 {
2912         struct btrfs_block_group_cache *cache;
2913 
2914         cache = btrfs_lookup_block_group(fs_info, start);
2915         ASSERT(cache);
2916 
2917         spin_lock(&cache->lock);
2918         cache->delalloc_bytes -= len;
2919         spin_unlock(&cache->lock);
2920 
2921         btrfs_put_block_group(cache);
2922 }
2923 
2924 /* as ordered data IO finishes, this gets called so we can finish
2925  * an ordered extent if the range of bytes in the file it covers are
2926  * fully written.
2927  */
2928 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2929 {
2930         struct inode *inode = ordered_extent->inode;
2931         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2932         struct btrfs_root *root = BTRFS_I(inode)->root;
2933         struct btrfs_trans_handle *trans = NULL;
2934         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2935         struct extent_state *cached_state = NULL;
2936         struct new_sa_defrag_extent *new = NULL;
2937         int compress_type = 0;
2938         int ret = 0;
2939         u64 logical_len = ordered_extent->len;
2940         bool nolock;
2941         bool truncated = false;
2942         bool range_locked = false;
2943         bool clear_new_delalloc_bytes = false;
2944         bool clear_reserved_extent = true;
2945 
2946         if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2947             !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
2948             !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
2949                 clear_new_delalloc_bytes = true;
2950 
2951         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
2952 
2953         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2954                 ret = -EIO;
2955                 goto out;
2956         }
2957 
2958         btrfs_free_io_failure_record(BTRFS_I(inode),
2959                         ordered_extent->file_offset,
2960                         ordered_extent->file_offset +
2961                         ordered_extent->len - 1);
2962 
2963         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2964                 truncated = true;
2965                 logical_len = ordered_extent->truncated_len;
2966                 /* Truncated the entire extent, don't bother adding */
2967                 if (!logical_len)
2968                         goto out;
2969         }
2970 
2971         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2972                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2973 
2974                 /*
2975                  * For mwrite(mmap + memset to write) case, we still reserve
2976                  * space for NOCOW range.
2977                  * As NOCOW won't cause a new delayed ref, just free the space
2978                  */
2979                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
2980                                        ordered_extent->len);
2981                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2982                 if (nolock)
2983                         trans = btrfs_join_transaction_nolock(root);
2984                 else
2985                         trans = btrfs_join_transaction(root);
2986                 if (IS_ERR(trans)) {
2987                         ret = PTR_ERR(trans);
2988                         trans = NULL;
2989                         goto out;
2990                 }
2991                 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
2992                 ret = btrfs_update_inode_fallback(trans, root, inode);
2993                 if (ret) /* -ENOMEM or corruption */
2994                         btrfs_abort_transaction(trans, ret);
2995                 goto out;
2996         }
2997 
2998         range_locked = true;
2999         lock_extent_bits(io_tree, ordered_extent->file_offset,
3000                          ordered_extent->file_offset + ordered_extent->len - 1,
3001                          &cached_state);
3002 
3003         ret = test_range_bit(io_tree, ordered_extent->file_offset,
3004                         ordered_extent->file_offset + ordered_extent->len - 1,
3005                         EXTENT_DEFRAG, 0, cached_state);
3006         if (ret) {
3007                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3008                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3009                         /* the inode is shared */
3010                         new = record_old_file_extents(inode, ordered_extent);
3011 
3012                 clear_extent_bit(io_tree, ordered_extent->file_offset,
3013                         ordered_extent->file_offset + ordered_extent->len - 1,
3014                         EXTENT_DEFRAG, 0, 0, &cached_state);
3015         }
3016 
3017         if (nolock)
3018                 trans = btrfs_join_transaction_nolock(root);
3019         else
3020                 trans = btrfs_join_transaction(root);
3021         if (IS_ERR(trans)) {
3022                 ret = PTR_ERR(trans);
3023                 trans = NULL;
3024                 goto out;
3025         }
3026 
3027         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3028 
3029         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3030                 compress_type = ordered_extent->compress_type;
3031         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3032                 BUG_ON(compress_type);
3033                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3034                                        ordered_extent->len);
3035                 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3036                                                 ordered_extent->file_offset,
3037                                                 ordered_extent->file_offset +
3038                                                 logical_len);
3039         } else {
3040                 BUG_ON(root == fs_info->tree_root);
3041                 ret = insert_reserved_file_extent(trans, inode,
3042                                                 ordered_extent->file_offset,
3043                                                 ordered_extent->start,
3044                                                 ordered_extent->disk_len,
3045                                                 logical_len, logical_len,
3046                                                 compress_type, 0, 0,
3047                                                 BTRFS_FILE_EXTENT_REG);
3048                 if (!ret) {
3049                         clear_reserved_extent = false;
3050                         btrfs_release_delalloc_bytes(fs_info,
3051                                                      ordered_extent->start,
3052                                                      ordered_extent->disk_len);
3053                 }
3054         }
3055         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
3056                            ordered_extent->file_offset, ordered_extent->len,
3057                            trans->transid);
3058         if (ret < 0) {
3059                 btrfs_abort_transaction(trans, ret);
3060                 goto out;
3061         }
3062 
3063         ret = add_pending_csums(trans, inode, &ordered_extent->list);
3064         if (ret) {
3065                 btrfs_abort_transaction(trans, ret);
3066                 goto out;
3067         }
3068 
3069         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3070         ret = btrfs_update_inode_fallback(trans, root, inode);
3071         if (ret) { /* -ENOMEM or corruption */
3072                 btrfs_abort_transaction(trans, ret);
3073                 goto out;
3074         }
3075         ret = 0;
3076 out:
3077         if (range_locked || clear_new_delalloc_bytes) {
3078                 unsigned int clear_bits = 0;
3079 
3080                 if (range_locked)
3081                         clear_bits |= EXTENT_LOCKED;
3082                 if (clear_new_delalloc_bytes)
3083                         clear_bits |= EXTENT_DELALLOC_NEW;
3084                 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3085                                  ordered_extent->file_offset,
3086                                  ordered_extent->file_offset +
3087                                  ordered_extent->len - 1,
3088                                  clear_bits,
3089                                  (clear_bits & EXTENT_LOCKED) ? 1 : 0,
3090                                  0, &cached_state);
3091         }
3092 
3093         if (trans)
3094                 btrfs_end_transaction(trans);
3095 
3096         if (ret || truncated) {
3097                 u64 start, end;
3098 
3099                 if (truncated)
3100                         start = ordered_extent->file_offset + logical_len;
3101                 else
3102                         start = ordered_extent->file_offset;
3103                 end = ordered_extent->file_offset + ordered_extent->len - 1;
3104                 clear_extent_uptodate(io_tree, start, end, NULL);
3105 
3106                 /* Drop the cache for the part of the extent we didn't write. */
3107                 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3108 
3109                 /*
3110                  * If the ordered extent had an IOERR or something else went
3111                  * wrong we need to return the space for this ordered extent
3112                  * back to the allocator.  We only free the extent in the
3113                  * truncated case if we didn't write out the extent at all.
3114                  *
3115                  * If we made it past insert_reserved_file_extent before we
3116                  * errored out then we don't need to do this as the accounting
3117                  * has already been done.
3118                  */
3119                 if ((ret || !logical_len) &&
3120                     clear_reserved_extent &&
3121                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3122                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3123                         btrfs_free_reserved_extent(fs_info,
3124                                                    ordered_extent->start,
3125                                                    ordered_extent->disk_len, 1);
3126         }
3127 
3128 
3129         /*
3130          * This needs to be done to make sure anybody waiting knows we are done
3131          * updating everything for this ordered extent.
3132          */
3133         btrfs_remove_ordered_extent(inode, ordered_extent);
3134 
3135         /* for snapshot-aware defrag */
3136         if (new) {
3137                 if (ret) {
3138                         free_sa_defrag_extent(new);
3139                         atomic_dec(&fs_info->defrag_running);
3140                 } else {
3141                         relink_file_extents(new);
3142                 }
3143         }
3144 
3145         /* once for us */
3146         btrfs_put_ordered_extent(ordered_extent);
3147         /* once for the tree */
3148         btrfs_put_ordered_extent(ordered_extent);
3149 
3150         return ret;
3151 }
3152 
3153 static void finish_ordered_fn(struct btrfs_work *work)
3154 {
3155         struct btrfs_ordered_extent *ordered_extent;
3156         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3157         btrfs_finish_ordered_io(ordered_extent);
3158 }
3159 
3160 static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3161                                 struct extent_state *state, int uptodate)
3162 {
3163         struct inode *inode = page->mapping->host;
3164         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3165         struct btrfs_ordered_extent *ordered_extent = NULL;
3166         struct btrfs_workqueue *wq;
3167         btrfs_work_func_t func;
3168 
3169         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3170 
3171         ClearPagePrivate2(page);
3172         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3173                                             end - start + 1, uptodate))
3174                 return;
3175 
3176         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
3177                 wq = fs_info->endio_freespace_worker;
3178                 func = btrfs_freespace_write_helper;
3179         } else {
3180                 wq = fs_info->endio_write_workers;
3181                 func = btrfs_endio_write_helper;
3182         }
3183 
3184         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3185                         NULL);
3186         btrfs_queue_work(wq, &ordered_extent->work);
3187 }
3188 
3189 static int __readpage_endio_check(struct inode *inode,
3190                                   struct btrfs_io_bio *io_bio,
3191                                   int icsum, struct page *page,
3192                                   int pgoff, u64 start, size_t len)
3193 {
3194         char *kaddr;
3195         u32 csum_expected;
3196         u32 csum = ~(u32)0;
3197 
3198         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3199 
3200         kaddr = kmap_atomic(page);
3201         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3202         btrfs_csum_final(csum, (u8 *)&csum);
3203         if (csum != csum_expected)
3204                 goto zeroit;
3205 
3206         kunmap_atomic(kaddr);
3207         return 0;
3208 zeroit:
3209         btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3210                                     io_bio->mirror_num);
3211         memset(kaddr + pgoff, 1, len);
3212         flush_dcache_page(page);
3213         kunmap_atomic(kaddr);
3214         return -EIO;
3215 }
3216 
3217 /*
3218  * when reads are done, we need to check csums to verify the data is correct
3219  * if there's a match, we allow the bio to finish.  If not, the code in
3220  * extent_io.c will try to find good copies for us.
3221  */
3222 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3223                                       u64 phy_offset, struct page *page,
3224                                       u64 start, u64 end, int mirror)
3225 {
3226         size_t offset = start - page_offset(page);
3227         struct inode *inode = page->mapping->host;
3228         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3229         struct btrfs_root *root = BTRFS_I(inode)->root;
3230 
3231         if (PageChecked(page)) {
3232                 ClearPageChecked(page);
3233                 return 0;
3234         }
3235 
3236         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3237                 return 0;
3238 
3239         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3240             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3241                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3242                 return 0;
3243         }
3244 
3245         phy_offset >>= inode->i_sb->s_blocksize_bits;
3246         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3247                                       start, (size_t)(end - start + 1));
3248 }
3249 
3250 /*
3251  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3252  *
3253  * @inode: The inode we want to perform iput on
3254  *
3255  * This function uses the generic vfs_inode::i_count to track whether we should
3256  * just decrement it (in case it's > 1) or if this is the last iput then link
3257  * the inode to the delayed iput machinery. Delayed iputs are processed at
3258  * transaction commit time/superblock commit/cleaner kthread.
3259  */
3260 void btrfs_add_delayed_iput(struct inode *inode)
3261 {
3262         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3263         struct btrfs_inode *binode = BTRFS_I(inode);
3264 
3265         if (atomic_add_unless(&inode->i_count, -1, 1))
3266                 return;
3267 
3268         spin_lock(&fs_info->delayed_iput_lock);
3269         ASSERT(list_empty(&binode->delayed_iput));
3270         list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3271         spin_unlock(&fs_info->delayed_iput_lock);
3272 }
3273 
3274 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3275 {
3276 
3277         spin_lock(&fs_info->delayed_iput_lock);
3278         while (!list_empty(&fs_info->delayed_iputs)) {
3279                 struct btrfs_inode *inode;
3280 
3281                 inode = list_first_entry(&fs_info->delayed_iputs,
3282                                 struct btrfs_inode, delayed_iput);
3283                 list_del_init(&inode->delayed_iput);
3284                 spin_unlock(&fs_info->delayed_iput_lock);
3285                 iput(&inode->vfs_inode);
3286                 spin_lock(&fs_info->delayed_iput_lock);
3287         }
3288         spin_unlock(&fs_info->delayed_iput_lock);
3289 }
3290 
3291 /*
3292  * This creates an orphan entry for the given inode in case something goes wrong
3293  * in the middle of an unlink.
3294  */
3295 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3296                      struct btrfs_inode *inode)
3297 {
3298         int ret;
3299 
3300         ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3301         if (ret && ret != -EEXIST) {
3302                 btrfs_abort_transaction(trans, ret);
3303                 return ret;
3304         }
3305 
3306         return 0;
3307 }
3308 
3309 /*
3310  * We have done the delete so we can go ahead and remove the orphan item for
3311  * this particular inode.
3312  */
3313 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3314                             struct btrfs_inode *inode)
3315 {
3316         return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3317 }
3318 
3319 /*
3320  * this cleans up any orphans that may be left on the list from the last use
3321  * of this root.
3322  */
3323 int btrfs_orphan_cleanup(struct btrfs_root *root)
3324 {
3325         struct btrfs_fs_info *fs_info = root->fs_info;
3326         struct btrfs_path *path;
3327         struct extent_buffer *leaf;
3328         struct btrfs_key key, found_key;
3329         struct btrfs_trans_handle *trans;
3330         struct inode *inode;
3331         u64 last_objectid = 0;
3332         int ret = 0, nr_unlink = 0;
3333 
3334         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3335                 return 0;
3336 
3337         path = btrfs_alloc_path();
3338         if (!path) {
3339                 ret = -ENOMEM;
3340                 goto out;
3341         }
3342         path->reada = READA_BACK;
3343 
3344         key.objectid = BTRFS_ORPHAN_OBJECTID;
3345         key.type = BTRFS_ORPHAN_ITEM_KEY;
3346         key.offset = (u64)-1;
3347 
3348         while (1) {
3349                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3350                 if (ret < 0)
3351                         goto out;
3352 
3353                 /*
3354                  * if ret == 0 means we found what we were searching for, which
3355                  * is weird, but possible, so only screw with path if we didn't
3356                  * find the key and see if we have stuff that matches
3357                  */
3358                 if (ret > 0) {
3359                         ret = 0;
3360                         if (path->slots[0] == 0)
3361                                 break;
3362                         path->slots[0]--;
3363                 }
3364 
3365                 /* pull out the item */
3366                 leaf = path->nodes[0];
3367                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3368 
3369                 /* make sure the item matches what we want */
3370                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3371                         break;
3372                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3373                         break;
3374 
3375                 /* release the path since we're done with it */
3376                 btrfs_release_path(path);
3377 
3378                 /*
3379                  * this is where we are basically btrfs_lookup, without the
3380                  * crossing root thing.  we store the inode number in the
3381                  * offset of the orphan item.
3382                  */
3383 
3384                 if (found_key.offset == last_objectid) {
3385                         btrfs_err(fs_info,
3386                                   "Error removing orphan entry, stopping orphan cleanup");
3387                         ret = -EINVAL;
3388                         goto out;
3389                 }
3390 
3391                 last_objectid = found_key.offset;
3392 
3393                 found_key.objectid = found_key.offset;
3394                 found_key.type = BTRFS_INODE_ITEM_KEY;
3395                 found_key.offset = 0;
3396                 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
3397                 ret = PTR_ERR_OR_ZERO(inode);
3398                 if (ret && ret != -ENOENT)
3399                         goto out;
3400 
3401                 if (ret == -ENOENT && root == fs_info->tree_root) {
3402                         struct btrfs_root *dead_root;
3403                         struct btrfs_fs_info *fs_info = root->fs_info;
3404                         int is_dead_root = 0;
3405 
3406                         /*
3407                          * this is an orphan in the tree root. Currently these
3408                          * could come from 2 sources:
3409                          *  a) a snapshot deletion in progress
3410                          *  b) a free space cache inode
3411                          * We need to distinguish those two, as the snapshot
3412                          * orphan must not get deleted.
3413                          * find_dead_roots already ran before us, so if this
3414                          * is a snapshot deletion, we should find the root
3415                          * in the dead_roots list
3416                          */
3417                         spin_lock(&fs_info->trans_lock);
3418                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3419                                             root_list) {
3420                                 if (dead_root->root_key.objectid ==
3421                                     found_key.objectid) {
3422                                         is_dead_root = 1;
3423                                         break;
3424                                 }
3425                         }
3426                         spin_unlock(&fs_info->trans_lock);
3427                         if (is_dead_root) {
3428                                 /* prevent this orphan from being found again */
3429                                 key.offset = found_key.objectid - 1;
3430                                 continue;
3431                         }
3432 
3433                 }
3434 
3435                 /*
3436                  * If we have an inode with links, there are a couple of
3437                  * possibilities. Old kernels (before v3.12) used to create an
3438                  * orphan item for truncate indicating that there were possibly
3439                  * extent items past i_size that needed to be deleted. In v3.12,
3440                  * truncate was changed to update i_size in sync with the extent
3441                  * items, but the (useless) orphan item was still created. Since
3442                  * v4.18, we don't create the orphan item for truncate at all.
3443                  *
3444                  * So, this item could mean that we need to do a truncate, but
3445                  * only if this filesystem was last used on a pre-v3.12 kernel
3446                  * and was not cleanly unmounted. The odds of that are quite
3447                  * slim, and it's a pain to do the truncate now, so just delete
3448                  * the orphan item.
3449                  *
3450                  * It's also possible that this orphan item was supposed to be
3451                  * deleted but wasn't. The inode number may have been reused,
3452                  * but either way, we can delete the orphan item.
3453                  */
3454                 if (ret == -ENOENT || inode->i_nlink) {
3455                         if (!ret)
3456                                 iput(inode);
3457                         trans = btrfs_start_transaction(root, 1);
3458                         if (IS_ERR(trans)) {
3459                                 ret = PTR_ERR(trans);
3460                                 goto out;
3461                         }
3462                         btrfs_debug(fs_info, "auto deleting %Lu",
3463                                     found_key.objectid);
3464                         ret = btrfs_del_orphan_item(trans, root,
3465                                                     found_key.objectid);
3466                         btrfs_end_transaction(trans);
3467                         if (ret)
3468                                 goto out;
3469                         continue;
3470                 }
3471 
3472                 nr_unlink++;
3473 
3474                 /* this will do delete_inode and everything for us */
3475                 iput(inode);
3476         }
3477         /* release the path since we're done with it */
3478         btrfs_release_path(path);
3479 
3480         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3481 
3482         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3483                 trans = btrfs_join_transaction(root);
3484                 if (!IS_ERR(trans))
3485                         btrfs_end_transaction(trans);
3486         }
3487 
3488         if (nr_unlink)
3489                 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3490 
3491 out:
3492         if (ret)
3493                 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3494         btrfs_free_path(path);
3495         return ret;
3496 }
3497 
3498 /*
3499  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3500  * don't find any xattrs, we know there can't be any acls.
3501  *
3502  * slot is the slot the inode is in, objectid is the objectid of the inode
3503  */
3504 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3505                                           int slot, u64 objectid,
3506                                           int *first_xattr_slot)
3507 {
3508         u32 nritems = btrfs_header_nritems(leaf);
3509         struct btrfs_key found_key;
3510         static u64 xattr_access = 0;
3511         static u64 xattr_default = 0;
3512         int scanned = 0;
3513 
3514         if (!xattr_access) {
3515                 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3516                                         strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3517                 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3518                                         strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3519         }
3520 
3521         slot++;
3522         *first_xattr_slot = -1;
3523         while (slot < nritems) {
3524                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3525 
3526                 /* we found a different objectid, there must not be acls */
3527                 if (found_key.objectid != objectid)
3528                         return 0;
3529 
3530                 /* we found an xattr, assume we've got an acl */
3531                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3532                         if (*first_xattr_slot == -1)
3533                                 *first_xattr_slot = slot;
3534                         if (found_key.offset == xattr_access ||
3535                             found_key.offset == xattr_default)
3536                                 return 1;
3537                 }
3538 
3539                 /*
3540                  * we found a key greater than an xattr key, there can't
3541                  * be any acls later on
3542                  */
3543                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3544                         return 0;
3545 
3546                 slot++;
3547                 scanned++;
3548 
3549                 /*
3550                  * it goes inode, inode backrefs, xattrs, extents,
3551                  * so if there are a ton of hard links to an inode there can
3552                  * be a lot of backrefs.  Don't waste time searching too hard,
3553                  * this is just an optimization
3554                  */
3555                 if (scanned >= 8)
3556                         break;
3557         }
3558         /* we hit the end of the leaf before we found an xattr or
3559          * something larger than an xattr.  We have to assume the inode
3560          * has acls
3561          */
3562         if (*first_xattr_slot == -1)
3563                 *first_xattr_slot = slot;
3564         return 1;
3565 }
3566 
3567 /*
3568  * read an inode from the btree into the in-memory inode
3569  */
3570 static int btrfs_read_locked_inode(struct inode *inode,
3571                                    struct btrfs_path *in_path)
3572 {
3573         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3574         struct btrfs_path *path = in_path;
3575         struct extent_buffer *leaf;
3576         struct btrfs_inode_item *inode_item;
3577         struct btrfs_root *root = BTRFS_I(inode)->root;
3578         struct btrfs_key location;
3579         unsigned long ptr;
3580         int maybe_acls;
3581         u32 rdev;
3582         int ret;
3583         bool filled = false;
3584         int first_xattr_slot;
3585 
3586         ret = btrfs_fill_inode(inode, &rdev);
3587         if (!ret)
3588                 filled = true;
3589 
3590         if (!path) {
3591                 path = btrfs_alloc_path();
3592                 if (!path)
3593                         return -ENOMEM;
3594         }
3595 
3596         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3597 
3598         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3599         if (ret) {
3600                 if (path != in_path)
3601                         btrfs_free_path(path);
3602                 return ret;
3603         }
3604 
3605         leaf = path->nodes[0];
3606 
3607         if (filled)
3608                 goto cache_index;
3609 
3610         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3611                                     struct btrfs_inode_item);
3612         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3613         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3614         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3615         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3616         btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3617 
3618         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3619         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3620 
3621         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3622         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3623 
3624         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3625         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3626 
3627         BTRFS_I(inode)->i_otime.tv_sec =
3628                 btrfs_timespec_sec(leaf, &inode_item->otime);
3629         BTRFS_I(inode)->i_otime.tv_nsec =
3630                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3631 
3632         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3633         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3634         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3635 
3636         inode_set_iversion_queried(inode,
3637                                    btrfs_inode_sequence(leaf, inode_item));
3638         inode->i_generation = BTRFS_I(inode)->generation;
3639         inode->i_rdev = 0;
3640         rdev = btrfs_inode_rdev(leaf, inode_item);
3641 
3642         BTRFS_I(inode)->index_cnt = (u64)-1;
3643         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3644 
3645 cache_index:
3646         /*
3647          * If we were modified in the current generation and evicted from memory
3648          * and then re-read we need to do a full sync since we don't have any
3649          * idea about which extents were modified before we were evicted from
3650          * cache.
3651          *
3652          * This is required for both inode re-read from disk and delayed inode
3653          * in delayed_nodes_tree.
3654          */
3655         if (BTRFS_I(inode)->last_trans == fs_info->generation)
3656                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3657                         &BTRFS_I(inode)->runtime_flags);
3658 
3659         /*
3660          * We don't persist the id of the transaction where an unlink operation
3661          * against the inode was last made. So here we assume the inode might
3662          * have been evicted, and therefore the exact value of last_unlink_trans
3663          * lost, and set it to last_trans to avoid metadata inconsistencies
3664          * between the inode and its parent if the inode is fsync'ed and the log
3665          * replayed. For example, in the scenario:
3666          *
3667          * touch mydir/foo
3668          * ln mydir/foo mydir/bar
3669          * sync
3670          * unlink mydir/bar
3671          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3672          * xfs_io -c fsync mydir/foo
3673          * <power failure>
3674          * mount fs, triggers fsync log replay
3675          *
3676          * We must make sure that when we fsync our inode foo we also log its
3677          * parent inode, otherwise after log replay the parent still has the
3678          * dentry with the "bar" name but our inode foo has a link count of 1
3679          * and doesn't have an inode ref with the name "bar" anymore.
3680          *
3681          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3682          * but it guarantees correctness at the expense of occasional full
3683          * transaction commits on fsync if our inode is a directory, or if our
3684          * inode is not a directory, logging its parent unnecessarily.
3685          */
3686         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3687         /*
3688          * Similar reasoning for last_link_trans, needs to be set otherwise
3689          * for a case like the following:
3690          *
3691          * mkdir A
3692          * touch foo
3693          * ln foo A/bar
3694          * echo 2 > /proc/sys/vm/drop_caches
3695          * fsync foo
3696          * <power failure>
3697          *
3698          * Would result in link bar and directory A not existing after the power
3699          * failure.
3700          */
3701         BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
3702 
3703         path->slots[0]++;
3704         if (inode->i_nlink != 1 ||
3705             path->slots[0] >= btrfs_header_nritems(leaf))
3706                 goto cache_acl;
3707 
3708         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3709         if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3710                 goto cache_acl;
3711 
3712         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3713         if (location.type == BTRFS_INODE_REF_KEY) {
3714                 struct btrfs_inode_ref *ref;
3715 
3716                 ref = (struct btrfs_inode_ref *)ptr;
3717                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3718         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3719                 struct btrfs_inode_extref *extref;
3720 
3721                 extref = (struct btrfs_inode_extref *)ptr;
3722                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3723                                                                      extref);
3724         }
3725 cache_acl:
3726         /*
3727          * try to precache a NULL acl entry for files that don't have
3728          * any xattrs or acls
3729          */
3730         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3731                         btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3732         if (first_xattr_slot != -1) {
3733                 path->slots[0] = first_xattr_slot;
3734                 ret = btrfs_load_inode_props(inode, path);
3735                 if (ret)
3736                         btrfs_err(fs_info,
3737                                   "error loading props for ino %llu (root %llu): %d",
3738                                   btrfs_ino(BTRFS_I(inode)),
3739                                   root->root_key.objectid, ret);
3740         }
3741         if (path != in_path)
3742                 btrfs_free_path(path);
3743 
3744         if (!maybe_acls)
3745                 cache_no_acl(inode);
3746 
3747         switch (inode->i_mode & S_IFMT) {
3748         case S_IFREG:
3749                 inode->i_mapping->a_ops = &btrfs_aops;
3750                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3751                 inode->i_fop = &btrfs_file_operations;
3752                 inode->i_op = &btrfs_file_inode_operations;
3753                 break;
3754         case S_IFDIR:
3755                 inode->i_fop = &btrfs_dir_file_operations;
3756                 inode->i_op = &btrfs_dir_inode_operations;
3757                 break;
3758         case S_IFLNK:
3759                 inode->i_op = &btrfs_symlink_inode_operations;
3760                 inode_nohighmem(inode);
3761                 inode->i_mapping->a_ops = &btrfs_aops;
3762                 break;
3763         default:
3764                 inode->i_op = &btrfs_special_inode_operations;
3765                 init_special_inode(inode, inode->i_mode, rdev);
3766                 break;
3767         }
3768 
3769         btrfs_sync_inode_flags_to_i_flags(inode);
3770         return 0;
3771 }
3772 
3773 /*
3774  * given a leaf and an inode, copy the inode fields into the leaf
3775  */
3776 static void fill_inode_item(struct btrfs_trans_handle *trans,
3777                             struct extent_buffer *leaf,
3778                             struct btrfs_inode_item *item,
3779                             struct inode *inode)
3780 {
3781         struct btrfs_map_token token;
3782 
3783         btrfs_init_map_token(&token);
3784 
3785         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3786         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3787         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3788                                    &token);
3789         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3790         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3791 
3792         btrfs_set_token_timespec_sec(leaf, &item->atime,
3793                                      inode->i_atime.tv_sec, &token);
3794         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3795                                       inode->i_atime.tv_nsec, &token);
3796 
3797         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3798                                      inode->i_mtime.tv_sec, &token);
3799         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3800                                       inode->i_mtime.tv_nsec, &token);
3801 
3802         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3803                                      inode->i_ctime.tv_sec, &token);
3804         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3805                                       inode->i_ctime.tv_nsec, &token);
3806 
3807         btrfs_set_token_timespec_sec(leaf, &item->otime,
3808                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3809         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3810                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3811 
3812         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3813                                      &token);
3814         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3815                                          &token);
3816         btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
3817                                        &token);
3818         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3819         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3820         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3821         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3822 }
3823 
3824 /*
3825  * copy everything in the in-memory inode into the btree.
3826  */
3827 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3828                                 struct btrfs_root *root, struct inode *inode)
3829 {
3830         struct btrfs_inode_item *inode_item;
3831         struct btrfs_path *path;
3832         struct extent_buffer *leaf;
3833         int ret;
3834 
3835         path = btrfs_alloc_path();
3836         if (!path)
3837                 return -ENOMEM;
3838 
3839         path->leave_spinning = 1;
3840         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3841                                  1);
3842         if (ret) {
3843                 if (ret > 0)
3844                         ret = -ENOENT;
3845                 goto failed;
3846         }
3847 
3848         leaf = path->nodes[0];
3849         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3850                                     struct btrfs_inode_item);
3851 
3852         fill_inode_item(trans, leaf, inode_item, inode);
3853         btrfs_mark_buffer_dirty(leaf);
3854         btrfs_set_inode_last_trans(trans, inode);
3855         ret = 0;
3856 failed:
3857         btrfs_free_path(path);
3858         return ret;
3859 }
3860 
3861 /*
3862  * copy everything in the in-memory inode into the btree.
3863  */
3864 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3865                                 struct btrfs_root *root, struct inode *inode)
3866 {
3867         struct btrfs_fs_info *fs_info = root->fs_info;
3868         int ret;
3869 
3870         /*
3871          * If the inode is a free space inode, we can deadlock during commit
3872          * if we put it into the delayed code.
3873          *
3874          * The data relocation inode should also be directly updated
3875          * without delay
3876          */
3877         if (!btrfs_is_free_space_inode(BTRFS_I(inode))
3878             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3879             && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
3880                 btrfs_update_root_times(trans, root);
3881 
3882                 ret = btrfs_delayed_update_inode(trans, root, inode);
3883                 if (!ret)
3884                         btrfs_set_inode_last_trans(trans, inode);
3885                 return ret;
3886         }
3887 
3888         return btrfs_update_inode_item(trans, root, inode);
3889 }
3890 
3891 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3892                                          struct btrfs_root *root,
3893                                          struct inode *inode)
3894 {
3895         int ret;
3896 
3897         ret = btrfs_update_inode(trans, root, inode);
3898         if (ret == -ENOSPC)
3899                 return btrfs_update_inode_item(trans, root, inode);
3900         return ret;
3901 }
3902 
3903 /*
3904  * unlink helper that gets used here in inode.c and in the tree logging
3905  * recovery code.  It remove a link in a directory with a given name, and
3906  * also drops the back refs in the inode to the directory
3907  */
3908 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3909                                 struct btrfs_root *root,
3910                                 struct btrfs_inode *dir,
3911                                 struct btrfs_inode *inode,
3912                                 const char *name, int name_len)
3913 {
3914         struct btrfs_fs_info *fs_info = root->fs_info;
3915         struct btrfs_path *path;
3916         int ret = 0;
3917         struct extent_buffer *leaf;
3918         struct btrfs_dir_item *di;
3919         struct btrfs_key key;
3920         u64 index;
3921         u64 ino = btrfs_ino(inode);
3922         u64 dir_ino = btrfs_ino(dir);
3923 
3924         path = btrfs_alloc_path();
3925         if (!path) {
3926                 ret = -ENOMEM;
3927                 goto out;
3928         }
3929 
3930         path->leave_spinning = 1;
3931         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3932                                     name, name_len, -1);
3933         if (IS_ERR_OR_NULL(di)) {
3934                 ret = di ? PTR_ERR(di) : -ENOENT;
3935                 goto err;
3936         }
3937         leaf = path->nodes[0];
3938         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3939         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3940         if (ret)
3941                 goto err;
3942         btrfs_release_path(path);
3943 
3944         /*
3945          * If we don't have dir index, we have to get it by looking up
3946          * the inode ref, since we get the inode ref, remove it directly,
3947          * it is unnecessary to do delayed deletion.
3948          *
3949          * But if we have dir index, needn't search inode ref to get it.
3950          * Since the inode ref is close to the inode item, it is better
3951          * that we delay to delete it, and just do this deletion when
3952          * we update the inode item.
3953          */
3954         if (inode->dir_index) {
3955                 ret = btrfs_delayed_delete_inode_ref(inode);
3956                 if (!ret) {
3957                         index = inode->dir_index;
3958                         goto skip_backref;
3959                 }
3960         }
3961 
3962         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3963                                   dir_ino, &index);
3964         if (ret) {
3965                 btrfs_info(fs_info,
3966                         "failed to delete reference to %.*s, inode %llu parent %llu",
3967                         name_len, name, ino, dir_ino);
3968                 btrfs_abort_transaction(trans, ret);
3969                 goto err;
3970         }
3971 skip_backref:
3972         ret = btrfs_delete_delayed_dir_index(trans, dir, index);
3973         if (ret) {
3974                 btrfs_abort_transaction(trans, ret);
3975                 goto err;
3976         }
3977 
3978         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
3979                         dir_ino);
3980         if (ret != 0 && ret != -ENOENT) {
3981                 btrfs_abort_transaction(trans, ret);
3982                 goto err;
3983         }
3984 
3985         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
3986                         index);
3987         if (ret == -ENOENT)
3988                 ret = 0;
3989         else if (ret)
3990                 btrfs_abort_transaction(trans, ret);
3991 err:
3992         btrfs_free_path(path);
3993         if (ret)
3994                 goto out;
3995 
3996         btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
3997         inode_inc_iversion(&inode->vfs_inode);
3998         inode_inc_iversion(&dir->vfs_inode);
3999         inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
4000                 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4001         ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
4002 out:
4003         return ret;
4004 }
4005 
4006 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4007                        struct btrfs_root *root,
4008                        struct btrfs_inode *dir, struct btrfs_inode *inode,
4009                        const char *name, int name_len)
4010 {
4011         int ret;
4012         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4013         if (!ret) {
4014                 drop_nlink(&inode->vfs_inode);
4015                 ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
4016         }
4017         return ret;
4018 }
4019 
4020 /*
4021  * helper to start transaction for unlink and rmdir.
4022  *
4023  * unlink and rmdir are special in btrfs, they do not always free space, so
4024  * if we cannot make our reservations the normal way try and see if there is
4025  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4026  * allow the unlink to occur.
4027  */
4028 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4029 {
4030         struct btrfs_root *root = BTRFS_I(dir)->root;
4031 
4032         /*
4033          * 1 for the possible orphan item
4034          * 1 for the dir item
4035          * 1 for the dir index
4036          * 1 for the inode ref
4037          * 1 for the inode
4038          */
4039         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4040 }
4041 
4042 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4043 {
4044         struct btrfs_root *root = BTRFS_I(dir)->root;
4045         struct btrfs_trans_handle *trans;
4046         struct inode *inode = d_inode(dentry);
4047         int ret;
4048 
4049         trans = __unlink_start_trans(dir);
4050         if (IS_ERR(trans))
4051                 return PTR_ERR(trans);
4052 
4053         btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4054                         0);
4055 
4056         ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4057                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4058                         dentry->d_name.len);
4059         if (ret)
4060                 goto out;
4061 
4062         if (inode->i_nlink == 0) {
4063                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4064                 if (ret)
4065                         goto out;
4066         }
4067 
4068 out:
4069         btrfs_end_transaction(trans);
4070         btrfs_btree_balance_dirty(root->fs_info);
4071         return ret;
4072 }
4073 
4074 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4075                                struct inode *dir, u64 objectid,
4076                                const char *name, int name_len)
4077 {
4078         struct btrfs_root *root = BTRFS_I(dir)->root;
4079         struct btrfs_path *path;
4080         struct extent_buffer *leaf;
4081         struct btrfs_dir_item *di;
4082         struct btrfs_key key;
4083         u64 index;
4084         int ret;
4085         u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4086 
4087         path = btrfs_alloc_path();
4088         if (!path)
4089                 return -ENOMEM;
4090 
4091         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4092                                    name, name_len, -1);
4093         if (IS_ERR_OR_NULL(di)) {
4094                 ret = di ? PTR_ERR(di) : -ENOENT;
4095                 goto out;
4096         }
4097 
4098         leaf = path->nodes[0];
4099         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4100         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4101         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4102         if (ret) {
4103                 btrfs_abort_transaction(trans, ret);
4104                 goto out;
4105         }
4106         btrfs_release_path(path);
4107 
4108         ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
4109                                  dir_ino, &index, name, name_len);
4110         if (ret < 0) {
4111                 if (ret != -ENOENT) {
4112                         btrfs_abort_transaction(trans, ret);
4113                         goto out;
4114                 }
4115                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4116                                                  name, name_len);
4117                 if (IS_ERR_OR_NULL(di)) {
4118                         if (!di)
4119                                 ret = -ENOENT;
4120                         else
4121                                 ret = PTR_ERR(di);
4122                         btrfs_abort_transaction(trans, ret);
4123                         goto out;
4124                 }
4125 
4126                 leaf = path->nodes[0];
4127                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4128                 index = key.offset;
4129         }
4130         btrfs_release_path(path);
4131 
4132         ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4133         if (ret) {
4134                 btrfs_abort_transaction(trans, ret);
4135                 goto out;
4136         }
4137 
4138         btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4139         inode_inc_iversion(dir);
4140         dir->i_mtime = dir->i_ctime = current_time(dir);
4141         ret = btrfs_update_inode_fallback(trans, root, dir);
4142         if (ret)
4143                 btrfs_abort_transaction(trans, ret);
4144 out:
4145         btrfs_free_path(path);
4146         return ret;
4147 }
4148 
4149 /*
4150  * Helper to check if the subvolume references other subvolumes or if it's
4151  * default.
4152  */
4153 static noinline int may_destroy_subvol(struct btrfs_root *root)
4154 {
4155         struct btrfs_fs_info *fs_info = root->fs_info;
4156         struct btrfs_path *path;
4157         struct btrfs_dir_item *di;
4158         struct btrfs_key key;
4159         u64 dir_id;
4160         int ret;
4161 
4162         path = btrfs_alloc_path();
4163         if (!path)
4164                 return -ENOMEM;
4165 
4166         /* Make sure this root isn't set as the default subvol */
4167         dir_id = btrfs_super_root_dir(fs_info->super_copy);
4168         di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4169                                    dir_id, "default", 7, 0);
4170         if (di && !IS_ERR(di)) {
4171                 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4172                 if (key.objectid == root->root_key.objectid) {
4173                         ret = -EPERM;
4174                         btrfs_err(fs_info,
4175                                   "deleting default subvolume %llu is not allowed",
4176                                   key.objectid);
4177                         goto out;
4178                 }
4179                 btrfs_release_path(path);
4180         }
4181 
4182         key.objectid = root->root_key.objectid;
4183         key.type = BTRFS_ROOT_REF_KEY;
4184         key.offset = (u64)-1;
4185 
4186         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4187         if (ret < 0)
4188                 goto out;
4189         BUG_ON(ret == 0);
4190 
4191         ret = 0;
4192         if (path->slots[0] > 0) {
4193                 path->slots[0]--;
4194                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4195                 if (key.objectid == root->root_key.objectid &&
4196                     key.type == BTRFS_ROOT_REF_KEY)
4197                         ret = -ENOTEMPTY;
4198         }
4199 out:
4200         btrfs_free_path(path);
4201         return ret;
4202 }
4203 
4204 /* Delete all dentries for inodes belonging to the root */
4205 static void btrfs_prune_dentries(struct btrfs_root *root)
4206 {
4207         struct btrfs_fs_info *fs_info = root->fs_info;
4208         struct rb_node *node;
4209         struct rb_node *prev;
4210         struct btrfs_inode *entry;
4211         struct inode *inode;
4212         u64 objectid = 0;
4213 
4214         if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
4215                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4216 
4217         spin_lock(&root->inode_lock);
4218 again:
4219         node = root->inode_tree.rb_node;
4220         prev = NULL;
4221         while (node) {
4222                 prev = node;
4223                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4224 
4225                 if (objectid < btrfs_ino(entry))
4226                         node = node->rb_left;
4227                 else if (objectid > btrfs_ino(entry))
4228                         node = node->rb_right;
4229                 else
4230                         break;
4231         }
4232         if (!node) {
4233                 while (prev) {
4234                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
4235                         if (objectid <= btrfs_ino(entry)) {
4236                                 node = prev;
4237                                 break;
4238                         }
4239                         prev = rb_next(prev);
4240                 }
4241         }
4242         while (node) {
4243                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4244                 objectid = btrfs_ino(entry) + 1;
4245                 inode = igrab(&entry->vfs_inode);
4246                 if (inode) {
4247                         spin_unlock(&root->inode_lock);
4248                         if (atomic_read(&inode->i_count) > 1)
4249                                 d_prune_aliases(inode);
4250                         /*
4251                          * btrfs_drop_inode will have it removed from the inode
4252                          * cache when its usage count hits zero.
4253                          */
4254                         iput(inode);
4255                         cond_resched();
4256                         spin_lock(&root->inode_lock);
4257                         goto again;
4258                 }
4259 
4260                 if (cond_resched_lock(&root->inode_lock))
4261                         goto again;
4262 
4263                 node = rb_next(node);
4264         }
4265         spin_unlock(&root->inode_lock);
4266 }
4267 
4268 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4269 {
4270         struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4271         struct btrfs_root *root = BTRFS_I(dir)->root;
4272         struct inode *inode = d_inode(dentry);
4273         struct btrfs_root *dest = BTRFS_I(inode)->root;
4274         struct btrfs_trans_handle *trans;
4275         struct btrfs_block_rsv block_rsv;
4276         u64 root_flags;
4277         int ret;
4278         int err;
4279 
4280         /*
4281          * Don't allow to delete a subvolume with send in progress. This is
4282          * inside the inode lock so the error handling that has to drop the bit
4283          * again is not run concurrently.
4284          */
4285         spin_lock(&dest->root_item_lock);
4286         if (dest->send_in_progress) {
4287                 spin_unlock(&dest->root_item_lock);
4288                 btrfs_warn(fs_info,
4289                            "attempt to delete subvolume %llu during send",
4290                            dest->root_key.objectid);
4291                 return -EPERM;
4292         }
4293         root_flags = btrfs_root_flags(&dest->root_item);
4294         btrfs_set_root_flags(&dest->root_item,
4295                              root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4296         spin_unlock(&dest->root_item_lock);
4297 
4298         down_write(&fs_info->subvol_sem);
4299 
4300         err = may_destroy_subvol(dest);
4301         if (err)
4302                 goto out_up_write;
4303 
4304         btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4305         /*
4306          * One for dir inode,
4307          * two for dir entries,
4308          * two for root ref/backref.
4309          */
4310         err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4311         if (err)
4312                 goto out_up_write;
4313 
4314         trans = btrfs_start_transaction(root, 0);
4315         if (IS_ERR(trans)) {
4316                 err = PTR_ERR(trans);
4317                 goto out_release;
4318         }
4319         trans->block_rsv = &block_rsv;
4320         trans->bytes_reserved = block_rsv.size;
4321 
4322         btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4323 
4324         ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
4325                                   dentry->d_name.name, dentry->d_name.len);
4326         if (ret) {
4327                 err = ret;
4328                 btrfs_abort_transaction(trans, ret);
4329                 goto out_end_trans;
4330         }
4331 
4332         btrfs_record_root_in_trans(trans, dest);
4333 
4334         memset(&dest->root_item.drop_progress, 0,
4335                 sizeof(dest->root_item.drop_progress));
4336         dest->root_item.drop_level = 0;
4337         btrfs_set_root_refs(&dest->root_item, 0);
4338 
4339         if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4340                 ret = btrfs_insert_orphan_item(trans,
4341                                         fs_info->tree_root,
4342                                         dest->root_key.objectid);
4343                 if (ret) {
4344                         btrfs_abort_transaction(trans, ret);
4345                         err = ret;
4346                         goto out_end_trans;
4347                 }
4348         }
4349 
4350         ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4351                                   BTRFS_UUID_KEY_SUBVOL,
4352                                   dest->root_key.objectid);
4353         if (ret && ret != -ENOENT) {
4354                 btrfs_abort_transaction(trans, ret);
4355                 err = ret;
4356                 goto out_end_trans;
4357         }
4358         if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4359                 ret = btrfs_uuid_tree_remove(trans,
4360                                           dest->root_item.received_uuid,
4361                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4362                                           dest->root_key.objectid);
4363                 if (ret && ret != -ENOENT) {
4364                         btrfs_abort_transaction(trans, ret);
4365                         err = ret;
4366                         goto out_end_trans;
4367                 }
4368         }
4369 
4370 out_end_trans:
4371         trans->block_rsv = NULL;
4372         trans->bytes_reserved = 0;
4373         ret = btrfs_end_transaction(trans);
4374         if (ret && !err)
4375                 err = ret;
4376         inode->i_flags |= S_DEAD;
4377 out_release:
4378         btrfs_subvolume_release_metadata(fs_info, &block_rsv);
4379 out_up_write:
4380         up_write(&fs_info->subvol_sem);
4381         if (err) {
4382                 spin_lock(&dest->root_item_lock);
4383                 root_flags = btrfs_root_flags(&dest->root_item);
4384                 btrfs_set_root_flags(&dest->root_item,
4385                                 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4386                 spin_unlock(&dest->root_item_lock);
4387         } else {
4388                 d_invalidate(dentry);
4389                 btrfs_prune_dentries(dest);
4390                 ASSERT(dest->send_in_progress == 0);
4391 
4392                 /* the last ref */
4393                 if (dest->ino_cache_inode) {
4394                         iput(dest->ino_cache_inode);
4395                         dest->ino_cache_inode = NULL;
4396                 }
4397         }
4398 
4399         return err;
4400 }
4401 
4402 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4403 {
4404         struct inode *inode = d_inode(dentry);
4405         int err = 0;
4406         struct btrfs_root *root = BTRFS_I(dir)->root;
4407         struct btrfs_trans_handle *trans;
4408         u64 last_unlink_trans;
4409 
4410         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4411                 return -ENOTEMPTY;
4412         if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4413                 return btrfs_delete_subvolume(dir, dentry);
4414 
4415         trans = __unlink_start_trans(dir);
4416         if (IS_ERR(trans))
4417                 return PTR_ERR(trans);
4418 
4419         if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4420                 err = btrfs_unlink_subvol(trans, dir,
4421                                           BTRFS_I(inode)->location.objectid,
4422                                           dentry->d_name.name,
4423                                           dentry->d_name.len);
4424                 goto out;
4425         }
4426 
4427         err = btrfs_orphan_add(trans, BTRFS_I(inode));
4428         if (err)
4429                 goto out;
4430 
4431         last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4432 
4433         /* now the directory is empty */
4434         err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4435                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4436                         dentry->d_name.len);
4437         if (!err) {
4438                 btrfs_i_size_write(BTRFS_I(inode), 0);
4439                 /*
4440                  * Propagate the last_unlink_trans value of the deleted dir to
4441                  * its parent directory. This is to prevent an unrecoverable
4442                  * log tree in the case we do something like this:
4443                  * 1) create dir foo
4444                  * 2) create snapshot under dir foo
4445                  * 3) delete the snapshot
4446                  * 4) rmdir foo
4447                  * 5) mkdir foo
4448                  * 6) fsync foo or some file inside foo
4449                  */
4450                 if (last_unlink_trans >= trans->transid)
4451                         BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4452         }
4453 out:
4454         btrfs_end_transaction(trans);
4455         btrfs_btree_balance_dirty(root->fs_info);
4456 
4457         return err;
4458 }
4459 
4460 static int truncate_space_check(struct btrfs_trans_handle *trans,
4461                                 struct btrfs_root *root,
4462                                 u64 bytes_deleted)
4463 {
4464         struct btrfs_fs_info *fs_info = root->fs_info;
4465         int ret;
4466 
4467         /*
4468          * This is only used to apply pressure to the enospc system, we don't
4469          * intend to use this reservation at all.
4470          */
4471         bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
4472         bytes_deleted *= fs_info->nodesize;
4473         ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
4474                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4475         if (!ret) {
4476                 trace_btrfs_space_reservation(fs_info, "transaction",
4477                                               trans->transid,
4478                                               bytes_deleted, 1);
4479                 trans->bytes_reserved += bytes_deleted;
4480         }
4481         return ret;
4482 
4483 }
4484 
4485 /*
4486  * Return this if we need to call truncate_block for the last bit of the
4487  * truncate.
4488  */
4489 #define NEED_TRUNCATE_BLOCK 1
4490 
4491 /*
4492  * this can truncate away extent items, csum items and directory items.
4493  * It starts at a high offset and removes keys until it can't find
4494  * any higher than new_size
4495  *
4496  * csum items that cross the new i_size are truncated to the new size
4497  * as well.
4498  *
4499  * min_type is the minimum key type to truncate down to.  If set to 0, this
4500  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4501  */
4502 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4503                                struct btrfs_root *root,
4504                                struct inode *inode,
4505                                u64 new_size, u32 min_type)
4506 {
4507         struct btrfs_fs_info *fs_info = root->fs_info;
4508         struct btrfs_path *path;
4509         struct extent_buffer *leaf;
4510         struct btrfs_file_extent_item *fi;
4511         struct btrfs_key key;
4512         struct btrfs_key found_key;
4513         u64 extent_start = 0;
4514         u64 extent_num_bytes = 0;
4515         u64 extent_offset = 0;
4516         u64 item_end = 0;
4517         u64 last_size = new_size;
4518         u32 found_type = (u8)-1;
4519         int found_extent;
4520         int del_item;
4521         int pending_del_nr = 0;
4522         int pending_del_slot = 0;
4523         int extent_type = -1;
4524         int ret;
4525         u64 ino = btrfs_ino(BTRFS_I(inode));
4526         u64 bytes_deleted = 0;
4527         bool be_nice = false;
4528         bool should_throttle = false;
4529         bool should_end = false;
4530 
4531         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4532 
4533         /*
4534          * for non-free space inodes and ref cows, we want to back off from
4535          * time to time
4536          */
4537         if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4538             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4539                 be_nice = true;
4540 
4541         path = btrfs_alloc_path();
4542         if (!path)
4543                 return -ENOMEM;
4544         path->reada = READA_BACK;
4545 
4546         /*
4547          * We want to drop from the next block forward in case this new size is
4548          * not block aligned since we will be keeping the last block of the
4549          * extent just the way it is.
4550          */
4551         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4552             root == fs_info->tree_root)
4553                 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4554                                         fs_info->sectorsize),
4555                                         (u64)-1, 0);
4556 
4557         /*
4558          * This function is also used to drop the items in the log tree before
4559          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4560          * it is used to drop the loged items. So we shouldn't kill the delayed
4561          * items.
4562          */
4563         if (min_type == 0 && root == BTRFS_I(inode)->root)
4564                 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4565 
4566         key.objectid = ino;
4567         key.offset = (u64)-1;
4568         key.type = (u8)-1;
4569 
4570 search_again:
4571         /*
4572          * with a 16K leaf size and 128MB extents, you can actually queue
4573          * up a huge file in a single leaf.  Most of the time that
4574          * bytes_deleted is > 0, it will be huge by the time we get here
4575          */
4576         if (be_nice && bytes_deleted > SZ_32M &&
4577             btrfs_should_end_transaction(trans)) {
4578                 ret = -EAGAIN;
4579                 goto out;
4580         }
4581 
4582         path->leave_spinning = 1;
4583         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4584         if (ret < 0)
4585                 goto out;
4586 
4587         if (ret > 0) {
4588                 ret = 0;
4589                 /* there are no items in the tree for us to truncate, we're
4590                  * done
4591                  */
4592                 if (path->slots[0] == 0)
4593                         goto out;
4594                 path->slots[0]--;
4595         }
4596 
4597         while (1) {
4598                 fi = NULL;
4599                 leaf = path->nodes[0];
4600                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4601                 found_type = found_key.type;
4602 
4603                 if (found_key.objectid != ino)
4604                         break;
4605 
4606                 if (found_type < min_type)
4607                         break;
4608 
4609                 item_end = found_key.offset;
4610                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4611                         fi = btrfs_item_ptr(leaf, path->slots[0],
4612                                             struct btrfs_file_extent_item);
4613                         extent_type = btrfs_file_extent_type(leaf, fi);
4614                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4615                                 item_end +=
4616                                     btrfs_file_extent_num_bytes(leaf, fi);
4617 
4618                                 trace_btrfs_truncate_show_fi_regular(
4619                                         BTRFS_I(inode), leaf, fi,
4620                                         found_key.offset);
4621                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4622                                 item_end += btrfs_file_extent_ram_bytes(leaf,
4623                                                                         fi);
4624 
4625                                 trace_btrfs_truncate_show_fi_inline(
4626                                         BTRFS_I(inode), leaf, fi, path->slots[0],
4627                                         found_key.offset);
4628                         }
4629                         item_end--;
4630                 }
4631                 if (found_type > min_type) {
4632                         del_item = 1;
4633                 } else {
4634                         if (item_end < new_size)
4635                                 break;
4636                         if (found_key.offset >= new_size)
4637                                 del_item = 1;
4638                         else
4639                                 del_item = 0;
4640                 }
4641                 found_extent = 0;
4642                 /* FIXME, shrink the extent if the ref count is only 1 */
4643                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4644                         goto delete;
4645 
4646                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4647                         u64 num_dec;
4648                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4649                         if (!del_item) {
4650                                 u64 orig_num_bytes =
4651                                         btrfs_file_extent_num_bytes(leaf, fi);
4652                                 extent_num_bytes = ALIGN(new_size -
4653                                                 found_key.offset,
4654                                                 fs_info->sectorsize);
4655                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4656                                                          extent_num_bytes);
4657                                 num_dec = (orig_num_bytes -
4658                                            extent_num_bytes);
4659                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4660                                              &root->state) &&
4661                                     extent_start != 0)
4662                                         inode_sub_bytes(inode, num_dec);
4663                                 btrfs_mark_buffer_dirty(leaf);
4664                         } else {
4665                                 extent_num_bytes =
4666                                         btrfs_file_extent_disk_num_bytes(leaf,
4667                                                                          fi);
4668                                 extent_offset = found_key.offset -
4669                                         btrfs_file_extent_offset(leaf, fi);
4670 
4671                                 /* FIXME blocksize != 4096 */
4672                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4673                                 if (extent_start != 0) {
4674                                         found_extent = 1;
4675                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4676                                                      &root->state))
4677                                                 inode_sub_bytes(inode, num_dec);
4678                                 }
4679                         }
4680                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4681                         /*
4682                          * we can't truncate inline items that have had
4683                          * special encodings
4684                          */
4685                         if (!del_item &&
4686                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4687                             btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
4688                             btrfs_file_extent_compression(leaf, fi) == 0) {
4689                                 u32 size = (u32)(new_size - found_key.offset);
4690 
4691                                 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4692                                 size = btrfs_file_extent_calc_inline_size(size);
4693                                 btrfs_truncate_item(root->fs_info, path, size, 1);
4694                         } else if (!del_item) {
4695                                 /*
4696                                  * We have to bail so the last_size is set to
4697                                  * just before this extent.
4698                                  */
4699                                 ret = NEED_TRUNCATE_BLOCK;
4700                                 break;
4701                         }
4702 
4703                         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4704                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4705                 }
4706 delete:
4707                 if (del_item)
4708                         last_size = found_key.offset;
4709                 else
4710                         last_size = new_size;
4711                 if (del_item) {
4712                         if (!pending_del_nr) {
4713                                 /* no pending yet, add ourselves */
4714                                 pending_del_slot = path->slots[0];
4715                                 pending_del_nr = 1;
4716                         } else if (pending_del_nr &&
4717                                    path->slots[0] + 1 == pending_del_slot) {
4718                                 /* hop on the pending chunk */
4719                                 pending_del_nr++;
4720                                 pending_del_slot = path->slots[0];
4721                         } else {
4722                                 BUG();
4723                         }
4724                 } else {
4725                         break;
4726                 }
4727                 should_throttle = false;
4728 
4729                 if (found_extent &&
4730                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4731                      root == fs_info->tree_root)) {
4732                         btrfs_set_path_blocking(path);
4733                         bytes_deleted += extent_num_bytes;
4734                         ret = btrfs_free_extent(trans, root, extent_start,
4735                                                 extent_num_bytes, 0,
4736                                                 btrfs_header_owner(leaf),
4737                                                 ino, extent_offset);
4738                         if (ret) {
4739                                 btrfs_abort_transaction(trans, ret);
4740                                 break;
4741                         }
4742                         if (btrfs_should_throttle_delayed_refs(trans))
4743                                 btrfs_async_run_delayed_refs(fs_info,
4744                                         trans->delayed_ref_updates * 2,
4745                                         trans->transid, 0);
4746                         if (be_nice) {
4747                                 if (truncate_space_check(trans, root,
4748                                                          extent_num_bytes)) {
4749                                         should_end = true;
4750                                 }
4751                                 if (btrfs_should_throttle_delayed_refs(trans))
4752                                         should_throttle = true;
4753                         }
4754                 }
4755 
4756                 if (found_type == BTRFS_INODE_ITEM_KEY)
4757                         break;
4758 
4759                 if (path->slots[0] == 0 ||
4760                     path->slots[0] != pending_del_slot ||
4761                     should_throttle || should_end) {
4762                         if (pending_del_nr) {
4763                                 ret = btrfs_del_items(trans, root, path,
4764                                                 pending_del_slot,
4765                                                 pending_del_nr);
4766                                 if (ret) {
4767                                         btrfs_abort_transaction(trans, ret);
4768                                         break;
4769                                 }
4770                                 pending_del_nr = 0;
4771                         }
4772                         btrfs_release_path(path);
4773                         if (should_throttle) {
4774                                 unsigned long updates = trans->delayed_ref_updates;
4775                                 if (updates) {
4776                                         trans->delayed_ref_updates = 0;
4777                                         ret = btrfs_run_delayed_refs(trans,
4778                                                                    updates * 2);
4779                                         if (ret)
4780                                                 break;
4781                                 }
4782                         }
4783                         /*
4784                          * if we failed to refill our space rsv, bail out
4785                          * and let the transaction restart
4786                          */
4787                         if (should_end) {
4788                                 ret = -EAGAIN;
4789                                 break;
4790                         }
4791                         goto search_again;
4792                 } else {
4793                         path->slots[0]--;
4794                 }
4795         }
4796 out:
4797         if (ret >= 0 && pending_del_nr) {
4798                 int err;
4799 
4800                 err = btrfs_del_items(trans, root, path, pending_del_slot,
4801                                       pending_del_nr);
4802                 if (err) {
4803                         btrfs_abort_transaction(trans, err);
4804                         ret = err;
4805                 }
4806         }
4807         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4808                 ASSERT(last_size >= new_size);
4809                 if (!ret && last_size > new_size)
4810                         last_size = new_size;
4811                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4812         }
4813 
4814         btrfs_free_path(path);
4815 
4816         if (be_nice && bytes_deleted > SZ_32M && (ret >= 0 || ret == -EAGAIN)) {
4817                 unsigned long updates = trans->delayed_ref_updates;
4818                 int err;
4819 
4820                 if (updates) {
4821                         trans->delayed_ref_updates = 0;
4822                         err = btrfs_run_delayed_refs(trans, updates * 2);
4823                         if (err)
4824                                 ret = err;
4825                 }
4826         }
4827         return ret;
4828 }
4829 
4830 /*
4831  * btrfs_truncate_block - read, zero a chunk and write a block
4832  * @inode - inode that we're zeroing
4833  * @from - the offset to start zeroing
4834  * @len - the length to zero, 0 to zero the entire range respective to the
4835  *      offset
4836  * @front - zero up to the offset instead of from the offset on
4837  *
4838  * This will find the block for the "from" offset and cow the block and zero the
4839  * part we want to zero.  This is used with truncate and hole punching.
4840  */
4841 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4842                         int front)
4843 {
4844         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4845         struct address_space *mapping = inode->i_mapping;
4846         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4847         struct btrfs_ordered_extent *ordered;
4848         struct extent_state *cached_state = NULL;
4849         struct extent_changeset *data_reserved = NULL;
4850         char *kaddr;
4851         u32 blocksize = fs_info->sectorsize;
4852         pgoff_t index = from >> PAGE_SHIFT;
4853         unsigned offset = from & (blocksize - 1);
4854         struct page *page;
4855         gfp_t mask = btrfs_alloc_write_mask(mapping);
4856         int ret = 0;
4857         u64 block_start;
4858         u64 block_end;
4859 
4860         if (IS_ALIGNED(offset, blocksize) &&
4861             (!len || IS_ALIGNED(len, blocksize)))
4862                 goto out;
4863 
4864         block_start = round_down(from, blocksize);
4865         block_end = block_start + blocksize - 1;
4866 
4867         ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
4868                                            block_start, blocksize);
4869         if (ret)
4870                 goto out;
4871 
4872 again:
4873         page = find_or_create_page(mapping, index, mask);
4874         if (!page) {
4875                 btrfs_delalloc_release_space(inode, data_reserved,
4876                                              block_start, blocksize, true);
4877                 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
4878                 ret = -ENOMEM;
4879                 goto out;
4880         }
4881 
4882         if (!PageUptodate(page)) {
4883                 ret = btrfs_readpage(NULL, page);
4884                 lock_page(page);
4885                 if (page->mapping != mapping) {
4886                         unlock_page(page);
4887                         put_page(page);
4888                         goto again;
4889                 }
4890                 if (!PageUptodate(page)) {
4891                         ret = -EIO;
4892                         goto out_unlock;
4893                 }
4894         }
4895         wait_on_page_writeback(page);
4896 
4897         lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4898         set_page_extent_mapped(page);
4899 
4900         ordered = btrfs_lookup_ordered_extent(inode, block_start);
4901         if (ordered) {
4902                 unlock_extent_cached(io_tree, block_start, block_end,
4903                                      &cached_state);
4904                 unlock_page(page);
4905                 put_page(page);
4906                 btrfs_start_ordered_extent(inode, ordered, 1);
4907                 btrfs_put_ordered_extent(ordered);
4908                 goto again;
4909         }
4910 
4911         clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4912                           EXTENT_DIRTY | EXTENT_DELALLOC |
4913                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4914                           0, 0, &cached_state);
4915 
4916         ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4917                                         &cached_state, 0);
4918         if (ret) {
4919                 unlock_extent_cached(io_tree, block_start, block_end,
4920                                      &cached_state);
4921                 goto out_unlock;
4922         }
4923 
4924         if (offset != blocksize) {
4925                 if (!len)
4926                         len = blocksize - offset;
4927                 kaddr = kmap(page);
4928                 if (front)
4929                         memset(kaddr + (block_start - page_offset(page)),
4930                                 0, offset);
4931                 else
4932                         memset(kaddr + (block_start - page_offset(page)) +  offset,
4933                                 0, len);
4934                 flush_dcache_page(page);
4935                 kunmap(page);
4936         }
4937         ClearPageChecked(page);
4938         set_page_dirty(page);
4939         unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
4940 
4941 out_unlock:
4942         if (ret)
4943                 btrfs_delalloc_release_space(inode, data_reserved, block_start,
4944                                              blocksize, true);
4945         btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
4946         unlock_page(page);
4947         put_page(page);
4948 out:
4949         extent_changeset_free(data_reserved);
4950         return ret;
4951 }
4952 
4953 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4954                              u64 offset, u64 len)
4955 {
4956         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4957         struct btrfs_trans_handle *trans;
4958         int ret;
4959 
4960         /*
4961          * Still need to make sure the inode looks like it's been updated so
4962          * that any holes get logged if we fsync.
4963          */
4964         if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
4965                 BTRFS_I(inode)->last_trans = fs_info->generation;
4966                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4967                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4968                 return 0;
4969         }
4970 
4971         /*
4972          * 1 - for the one we're dropping
4973          * 1 - for the one we're adding
4974          * 1 - for updating the inode.
4975          */
4976         trans = btrfs_start_transaction(root, 3);
4977         if (IS_ERR(trans))
4978                 return PTR_ERR(trans);
4979 
4980         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4981         if (ret) {
4982                 btrfs_abort_transaction(trans, ret);
4983                 btrfs_end_transaction(trans);
4984                 return ret;
4985         }
4986 
4987         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
4988                         offset, 0, 0, len, 0, len, 0, 0, 0);
4989         if (ret)
4990                 btrfs_abort_transaction(trans, ret);
4991         else
4992                 btrfs_update_inode(trans, root, inode);
4993         btrfs_end_transaction(trans);
4994         return ret;
4995 }
4996 
4997 /*
4998  * This function puts in dummy file extents for the area we're creating a hole
4999  * for.  So if we are truncating this file to a larger size we need to insert
5000  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5001  * the range between oldsize and size
5002  */
5003 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
5004 {
5005         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5006         struct btrfs_root *root = BTRFS_I(inode)->root;
5007         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5008         struct extent_map *em = NULL;
5009         struct extent_state *cached_state = NULL;
5010         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5011         u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5012         u64 block_end = ALIGN(size, fs_info->sectorsize);
5013         u64 last_byte;
5014         u64 cur_offset;
5015         u64 hole_size;
5016         int err = 0;
5017 
5018         /*
5019          * If our size started in the middle of a block we need to zero out the
5020          * rest of the block before we expand the i_size, otherwise we could
5021          * expose stale data.
5022          */
5023         err = btrfs_truncate_block(inode, oldsize, 0, 0);
5024         if (err)
5025                 return err;
5026 
5027         if (size <= hole_start)
5028                 return 0;
5029 
5030         while (1) {
5031                 struct btrfs_ordered_extent *ordered;
5032 
5033                 lock_extent_bits(io_tree, hole_start, block_end - 1,
5034                                  &cached_state);
5035                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
5036                                                      block_end - hole_start);
5037                 if (!ordered)
5038                         break;
5039                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
5040                                      &cached_state);
5041                 btrfs_start_ordered_extent(inode, ordered, 1);
5042                 btrfs_put_ordered_extent(ordered);
5043         }
5044 
5045         cur_offset = hole_start;
5046         while (1) {
5047                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
5048                                 block_end - cur_offset, 0);
5049                 if (IS_ERR(em)) {
5050                         err = PTR_ERR(em);
5051                         em = NULL;
5052                         break;
5053                 }
5054                 last_byte = min(extent_map_end(em), block_end);
5055                 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5056                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5057                         struct extent_map *hole_em;
5058                         hole_size = last_byte - cur_offset;
5059 
5060                         err = maybe_insert_hole(root, inode, cur_offset,
5061                                                 hole_size);
5062                         if (err)
5063                                 break;
5064                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
5065                                                 cur_offset + hole_size - 1, 0);
5066                         hole_em = alloc_extent_map();
5067                         if (!hole_em) {
5068                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5069                                         &BTRFS_I(inode)->runtime_flags);
5070                                 goto next;
5071                         }
5072                         hole_em->start = cur_offset;
5073                         hole_em->len = hole_size;
5074                         hole_em->orig_start = cur_offset;
5075 
5076                         hole_em->block_start = EXTENT_MAP_HOLE;
5077                         hole_em->block_len = 0;
5078                         hole_em->orig_block_len = 0;
5079                         hole_em->ram_bytes = hole_size;
5080                         hole_em->bdev = fs_info->fs_devices->latest_bdev;
5081                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
5082                         hole_em->generation = fs_info->generation;
5083 
5084                         while (1) {
5085                                 write_lock(&em_tree->lock);
5086                                 err = add_extent_mapping(em_tree, hole_em, 1);
5087                                 write_unlock(&em_tree->lock);
5088                                 if (err != -EEXIST)
5089                                         break;
5090                                 btrfs_drop_extent_cache(BTRFS_I(inode),
5091                                                         cur_offset,
5092                                                         cur_offset +
5093                                                         hole_size - 1, 0);
5094                         }
5095                         free_extent_map(hole_em);
5096                 }
5097 next:
5098                 free_extent_map(em);
5099                 em = NULL;
5100                 cur_offset = last_byte;
5101                 if (cur_offset >= block_end)
5102                         break;
5103         }
5104         free_extent_map(em);
5105         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5106         return err;
5107 }
5108 
5109 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5110 {
5111         struct btrfs_root *root = BTRFS_I(inode)->root;
5112         struct btrfs_trans_handle *trans;
5113         loff_t oldsize = i_size_read(inode);
5114         loff_t newsize = attr->ia_size;
5115         int mask = attr->ia_valid;
5116         int ret;
5117 
5118         /*
5119          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5120          * special case where we need to update the times despite not having
5121          * these flags set.  For all other operations the VFS set these flags
5122          * explicitly if it wants a timestamp update.
5123          */
5124         if (newsize != oldsize) {
5125                 inode_inc_iversion(inode);
5126                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5127                         inode->i_ctime = inode->i_mtime =
5128                                 current_time(inode);
5129         }
5130 
5131         if (newsize > oldsize) {
5132                 /*
5133                  * Don't do an expanding truncate while snapshotting is ongoing.
5134                  * This is to ensure the snapshot captures a fully consistent
5135                  * state of this file - if the snapshot captures this expanding
5136                  * truncation, it must capture all writes that happened before
5137                  * this truncation.
5138                  */
5139                 btrfs_wait_for_snapshot_creation(root);
5140                 ret = btrfs_cont_expand(inode, oldsize, newsize);
5141                 if (ret) {
5142                         btrfs_end_write_no_snapshotting(root);
5143                         return ret;
5144                 }
5145 
5146                 trans = btrfs_start_transaction(root, 1);
5147                 if (IS_ERR(trans)) {
5148                         btrfs_end_write_no_snapshotting(root);
5149                         return PTR_ERR(trans);
5150                 }
5151 
5152                 i_size_write(inode, newsize);
5153                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
5154                 pagecache_isize_extended(inode, oldsize, newsize);
5155                 ret = btrfs_update_inode(trans, root, inode);
5156                 btrfs_end_write_no_snapshotting(root);
5157                 btrfs_end_transaction(trans);
5158         } else {
5159 
5160                 /*
5161                  * We're truncating a file that used to have good data down to
5162                  * zero. Make sure it gets into the ordered flush list so that
5163                  * any new writes get down to disk quickly.
5164                  */
5165                 if (newsize == 0)
5166                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
5167                                 &BTRFS_I(inode)->runtime_flags);
5168 
5169                 truncate_setsize(inode, newsize);
5170 
5171                 /* Disable nonlocked read DIO to avoid the end less truncate */
5172                 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
5173                 inode_dio_wait(inode);
5174                 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
5175 
5176                 ret = btrfs_truncate(inode, newsize == oldsize);
5177                 if (ret && inode->i_nlink) {
5178                         int err;
5179 
5180                         /*
5181                          * Truncate failed, so fix up the in-memory size. We
5182                          * adjusted disk_i_size down as we removed extents, so
5183                          * wait for disk_i_size to be stable and then update the
5184                          * in-memory size to match.
5185                          */
5186                         err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5187                         if (err)
5188                                 return err;
5189                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5190                 }
5191         }
5192 
5193         return ret;
5194 }
5195 
5196 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5197 {
5198         struct inode *inode = d_inode(dentry);
5199         struct btrfs_root *root = BTRFS_I(inode)->root;
5200         int err;
5201 
5202         if (btrfs_root_readonly(root))
5203                 return -EROFS;
5204 
5205         err = setattr_prepare(dentry, attr);
5206         if (err)
5207                 return err;
5208 
5209         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5210                 err = btrfs_setsize(inode, attr);
5211                 if (err)
5212                         return err;
5213         }
5214 
5215         if (attr->ia_valid) {
5216                 setattr_copy(inode, attr);
5217                 inode_inc_iversion(inode);
5218                 err = btrfs_dirty_inode(inode);
5219 
5220                 if (!err && attr->ia_valid & ATTR_MODE)
5221                         err = posix_acl_chmod(inode, inode->i_mode);
5222         }
5223 
5224         return err;
5225 }
5226 
5227 /*
5228  * While truncating the inode pages during eviction, we get the VFS calling
5229  * btrfs_invalidatepage() against each page of the inode. This is slow because
5230  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5231  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5232  * extent_state structures over and over, wasting lots of time.
5233  *
5234  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5235  * those expensive operations on a per page basis and do only the ordered io
5236  * finishing, while we release here the extent_map and extent_state structures,
5237  * without the excessive merging and splitting.
5238  */
5239 static void evict_inode_truncate_pages(struct inode *inode)
5240 {
5241         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5242         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5243         struct rb_node *node;
5244 
5245         ASSERT(inode->i_state & I_FREEING);
5246         truncate_inode_pages_final(&inode->i_data);
5247 
5248         write_lock(&map_tree->lock);
5249         while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
5250                 struct extent_map *em;
5251 
5252                 node = rb_first_cached(&map_tree->map);
5253                 em = rb_entry(node, struct extent_map, rb_node);
5254                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5255                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5256                 remove_extent_mapping(map_tree, em);
5257                 free_extent_map(em);
5258                 if (need_resched()) {
5259                         write_unlock(&map_tree->lock);
5260                         cond_resched();
5261                         write_lock(&map_tree->lock);
5262                 }
5263         }
5264         write_unlock(&map_tree->lock);
5265 
5266         /*
5267          * Keep looping until we have no more ranges in the io tree.
5268          * We can have ongoing bios started by readpages (called from readahead)
5269          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5270          * still in progress (unlocked the pages in the bio but did not yet
5271          * unlocked the ranges in the io tree). Therefore this means some
5272          * ranges can still be locked and eviction started because before
5273          * submitting those bios, which are executed by a separate task (work
5274          * queue kthread), inode references (inode->i_count) were not taken
5275          * (which would be dropped in the end io callback of each bio).
5276          * Therefore here we effectively end up waiting for those bios and
5277          * anyone else holding locked ranges without having bumped the inode's
5278          * reference count - if we don't do it, when they access the inode's
5279          * io_tree to unlock a range it may be too late, leading to an
5280          * use-after-free issue.
5281          */
5282         spin_lock(&io_tree->lock);
5283         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5284                 struct extent_state *state;
5285                 struct extent_state *cached_state = NULL;
5286                 u64 start;
5287                 u64 end;
5288                 unsigned state_flags;
5289 
5290                 node = rb_first(&io_tree->state);
5291                 state = rb_entry(node, struct extent_state, rb_node);
5292                 start = state->start;
5293                 end = state->end;
5294                 state_flags = state->state;
5295                 spin_unlock(&io_tree->lock);
5296 
5297                 lock_extent_bits(io_tree, start, end, &cached_state);
5298 
5299                 /*
5300                  * If still has DELALLOC flag, the extent didn't reach disk,
5301                  * and its reserved space won't be freed by delayed_ref.
5302                  * So we need to free its reserved space here.
5303                  * (Refer to comment in btrfs_invalidatepage, case 2)
5304                  *
5305                  * Note, end is the bytenr of last byte, so we need + 1 here.
5306                  */
5307                 if (state_flags & EXTENT_DELALLOC)
5308                         btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
5309 
5310                 clear_extent_bit(io_tree, start, end,
5311                                  EXTENT_LOCKED | EXTENT_DIRTY |
5312                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5313                                  EXTENT_DEFRAG, 1, 1, &cached_state);
5314 
5315                 cond_resched();
5316                 spin_lock(&io_tree->lock);
5317         }
5318         spin_unlock(&io_tree->lock);
5319 }
5320 
5321 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5322                                                         struct btrfs_block_rsv *rsv)
5323 {
5324         struct btrfs_fs_info *fs_info = root->fs_info;
5325         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5326         int failures = 0;
5327 
5328         for (;;) {
5329                 struct btrfs_trans_handle *trans;
5330                 int ret;
5331 
5332                 ret = btrfs_block_rsv_refill(root, rsv, rsv->size,
5333                                              BTRFS_RESERVE_FLUSH_LIMIT);
5334 
5335                 if (ret && ++failures > 2) {
5336                         btrfs_warn(fs_info,
5337                                    "could not allocate space for a delete; will truncate on mount");
5338                         return ERR_PTR(-ENOSPC);
5339                 }
5340 
5341                 trans = btrfs_join_transaction(root);
5342                 if (IS_ERR(trans) || !ret)
5343                         return trans;
5344 
5345                 /*
5346                  * Try to steal from the global reserve if there is space for
5347                  * it.
5348                  */
5349                 if (!btrfs_check_space_for_delayed_refs(trans) &&
5350                     !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, false))
5351                         return trans;
5352 
5353                 /* If not, commit and try again. */
5354                 ret = btrfs_commit_transaction(trans);
5355                 if (ret)
5356                         return ERR_PTR(ret);
5357         }
5358 }
5359 
5360 void btrfs_evict_inode(struct inode *inode)
5361 {
5362         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5363         struct btrfs_trans_handle *trans;
5364         struct btrfs_root *root = BTRFS_I(inode)->root;
5365         struct btrfs_block_rsv *rsv;
5366         int ret;
5367 
5368         trace_btrfs_inode_evict(inode);
5369 
5370         if (!root) {
5371                 clear_inode(inode);
5372                 return;
5373         }
5374 
5375         evict_inode_truncate_pages(inode);
5376 
5377         if (inode->i_nlink &&
5378             ((btrfs_root_refs(&root->root_item) != 0 &&
5379               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5380              btrfs_is_free_space_inode(BTRFS_I(inode))))
5381                 goto no_delete;
5382 
5383         if (is_bad_inode(inode))
5384                 goto no_delete;
5385 
5386         btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5387 
5388         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5389                 goto no_delete;
5390 
5391         if (inode->i_nlink > 0) {
5392                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5393                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5394                 goto no_delete;
5395         }
5396 
5397         ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5398         if (ret)
5399                 goto no_delete;
5400 
5401         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5402         if (!rsv)
5403                 goto no_delete;
5404         rsv->size = btrfs_calc_trunc_metadata_size(fs_info, 1);
5405         rsv->failfast = 1;
5406 
5407         btrfs_i_size_write(BTRFS_I(inode), 0);
5408 
5409         while (1) {
5410                 trans = evict_refill_and_join(root, rsv);
5411                 if (IS_ERR(trans))
5412                         goto free_rsv;
5413 
5414                 trans->block_rsv = rsv;
5415 
5416                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5417                 trans->block_rsv = &fs_info->trans_block_rsv;
5418                 btrfs_end_transaction(trans);
5419                 btrfs_btree_balance_dirty(fs_info);
5420                 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5421                         goto free_rsv;
5422                 else if (!ret)
5423                         break;
5424         }
5425 
5426         /*
5427          * Errors here aren't a big deal, it just means we leave orphan items in
5428          * the tree. They will be cleaned up on the next mount. If the inode
5429          * number gets reused, cleanup deletes the orphan item without doing
5430          * anything, and unlink reuses the existing orphan item.
5431          *
5432          * If it turns out that we are dropping too many of these, we might want
5433          * to add a mechanism for retrying these after a commit.
5434          */
5435         trans = evict_refill_and_join(root, rsv);
5436         if (!IS_ERR(trans)) {
5437                 trans->block_rsv = rsv;
5438                 btrfs_orphan_del(trans, BTRFS_I(inode));
5439                 trans->block_rsv = &fs_info->trans_block_rsv;
5440                 btrfs_end_transaction(trans);
5441         }
5442 
5443         if (!(root == fs_info->tree_root ||
5444               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5445                 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
5446 
5447 free_rsv:
5448         btrfs_free_block_rsv(fs_info, rsv);
5449 no_delete:
5450         /*
5451          * If we didn't successfully delete, the orphan item will still be in
5452          * the tree and we'll retry on the next mount. Again, we might also want
5453          * to retry these periodically in the future.
5454          */
5455         btrfs_remove_delayed_node(BTRFS_I(inode));
5456         clear_inode(inode);
5457 }
5458 
5459 /*
5460  * this returns the key found in the dir entry in the location pointer.
5461  * If no dir entries were found, returns -ENOENT.
5462  * If found a corrupted location in dir entry, returns -EUCLEAN.
5463  */
5464 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5465                                struct btrfs_key *location)
5466 {
5467         const char *name = dentry->d_name.name;
5468         int namelen = dentry->d_name.len;
5469         struct btrfs_dir_item *di;
5470         struct btrfs_path *path;
5471         struct btrfs_root *root = BTRFS_I(dir)->root;
5472         int ret = 0;
5473 
5474         path = btrfs_alloc_path();
5475         if (!path)
5476                 return -ENOMEM;
5477 
5478         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5479                         name, namelen, 0);
5480         if (IS_ERR_OR_NULL(di)) {
5481                 ret = di ? PTR_ERR(di) : -ENOENT;
5482                 goto out;
5483         }
5484 
5485         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5486         if (location->type != BTRFS_INODE_ITEM_KEY &&
5487             location->type != BTRFS_ROOT_ITEM_KEY) {
5488                 ret = -EUCLEAN;
5489                 btrfs_warn(root->fs_info,
5490 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5491                            __func__, name, btrfs_ino(BTRFS_I(dir)),
5492                            location->objectid, location->type, location->offset);
5493         }
5494 out:
5495         btrfs_free_path(path);
5496         return ret;
5497 }
5498 
5499 /*
5500  * when we hit a tree root in a directory, the btrfs part of the inode
5501  * needs to be changed to reflect the root directory of the tree root.  This
5502  * is kind of like crossing a mount point.
5503  */
5504 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5505                                     struct inode *dir,
5506                                     struct dentry *dentry,
5507                                     struct btrfs_key *location,
5508                                     struct btrfs_root **sub_root)
5509 {
5510         struct btrfs_path *path;
5511         struct btrfs_root *new_root;
5512         struct btrfs_root_ref *ref;
5513         struct extent_buffer *leaf;
5514         struct btrfs_key key;
5515         int ret;
5516         int err = 0;
5517 
5518         path = btrfs_alloc_path();
5519         if (!path) {
5520                 err = -ENOMEM;
5521                 goto out;
5522         }
5523 
5524         err = -ENOENT;
5525         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5526         key.type = BTRFS_ROOT_REF_KEY;
5527