~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/btrfs/inode.c

Version: ~ [ linux-5.12 ] ~ [ linux-5.11.16 ] ~ [ linux-5.10.32 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.114 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.188 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.231 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.267 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.267 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2007 Oracle.  All rights reserved.
  3  *
  4  * This program is free software; you can redistribute it and/or
  5  * modify it under the terms of the GNU General Public
  6  * License v2 as published by the Free Software Foundation.
  7  *
  8  * This program is distributed in the hope that it will be useful,
  9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11  * General Public License for more details.
 12  *
 13  * You should have received a copy of the GNU General Public
 14  * License along with this program; if not, write to the
 15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16  * Boston, MA 021110-1307, USA.
 17  */
 18 
 19 #include <linux/kernel.h>
 20 #include <linux/bio.h>
 21 #include <linux/buffer_head.h>
 22 #include <linux/file.h>
 23 #include <linux/fs.h>
 24 #include <linux/pagemap.h>
 25 #include <linux/highmem.h>
 26 #include <linux/time.h>
 27 #include <linux/init.h>
 28 #include <linux/string.h>
 29 #include <linux/backing-dev.h>
 30 #include <linux/mpage.h>
 31 #include <linux/swap.h>
 32 #include <linux/writeback.h>
 33 #include <linux/statfs.h>
 34 #include <linux/compat.h>
 35 #include <linux/aio.h>
 36 #include <linux/bit_spinlock.h>
 37 #include <linux/xattr.h>
 38 #include <linux/posix_acl.h>
 39 #include <linux/falloc.h>
 40 #include <linux/slab.h>
 41 #include <linux/ratelimit.h>
 42 #include <linux/mount.h>
 43 #include <linux/btrfs.h>
 44 #include <linux/blkdev.h>
 45 #include <linux/posix_acl_xattr.h>
 46 #include "ctree.h"
 47 #include "disk-io.h"
 48 #include "transaction.h"
 49 #include "btrfs_inode.h"
 50 #include "print-tree.h"
 51 #include "ordered-data.h"
 52 #include "xattr.h"
 53 #include "tree-log.h"
 54 #include "volumes.h"
 55 #include "compression.h"
 56 #include "locking.h"
 57 #include "free-space-cache.h"
 58 #include "inode-map.h"
 59 #include "backref.h"
 60 #include "hash.h"
 61 #include "props.h"
 62 
 63 struct btrfs_iget_args {
 64         struct btrfs_key *location;
 65         struct btrfs_root *root;
 66 };
 67 
 68 static const struct inode_operations btrfs_dir_inode_operations;
 69 static const struct inode_operations btrfs_symlink_inode_operations;
 70 static const struct inode_operations btrfs_dir_ro_inode_operations;
 71 static const struct inode_operations btrfs_special_inode_operations;
 72 static const struct inode_operations btrfs_file_inode_operations;
 73 static const struct address_space_operations btrfs_aops;
 74 static const struct address_space_operations btrfs_symlink_aops;
 75 static const struct file_operations btrfs_dir_file_operations;
 76 static struct extent_io_ops btrfs_extent_io_ops;
 77 
 78 static struct kmem_cache *btrfs_inode_cachep;
 79 static struct kmem_cache *btrfs_delalloc_work_cachep;
 80 struct kmem_cache *btrfs_trans_handle_cachep;
 81 struct kmem_cache *btrfs_transaction_cachep;
 82 struct kmem_cache *btrfs_path_cachep;
 83 struct kmem_cache *btrfs_free_space_cachep;
 84 
 85 #define S_SHIFT 12
 86 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
 87         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
 88         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
 89         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
 90         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
 91         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
 92         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
 93         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
 94 };
 95 
 96 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
 97 static int btrfs_truncate(struct inode *inode);
 98 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 99 static noinline int cow_file_range(struct inode *inode,
100                                    struct page *locked_page,
101                                    u64 start, u64 end, int *page_started,
102                                    unsigned long *nr_written, int unlock);
103 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
104                                            u64 len, u64 orig_start,
105                                            u64 block_start, u64 block_len,
106                                            u64 orig_block_len, u64 ram_bytes,
107                                            int type);
108 
109 static int btrfs_dirty_inode(struct inode *inode);
110 
111 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
112                                      struct inode *inode,  struct inode *dir,
113                                      const struct qstr *qstr)
114 {
115         int err;
116 
117         err = btrfs_init_acl(trans, inode, dir);
118         if (!err)
119                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
120         return err;
121 }
122 
123 /*
124  * this does all the hard work for inserting an inline extent into
125  * the btree.  The caller should have done a btrfs_drop_extents so that
126  * no overlapping inline items exist in the btree
127  */
128 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
129                                 struct btrfs_path *path, int extent_inserted,
130                                 struct btrfs_root *root, struct inode *inode,
131                                 u64 start, size_t size, size_t compressed_size,
132                                 int compress_type,
133                                 struct page **compressed_pages)
134 {
135         struct extent_buffer *leaf;
136         struct page *page = NULL;
137         char *kaddr;
138         unsigned long ptr;
139         struct btrfs_file_extent_item *ei;
140         int err = 0;
141         int ret;
142         size_t cur_size = size;
143         unsigned long offset;
144 
145         if (compressed_size && compressed_pages)
146                 cur_size = compressed_size;
147 
148         inode_add_bytes(inode, size);
149 
150         if (!extent_inserted) {
151                 struct btrfs_key key;
152                 size_t datasize;
153 
154                 key.objectid = btrfs_ino(inode);
155                 key.offset = start;
156                 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
157 
158                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
159                 path->leave_spinning = 1;
160                 ret = btrfs_insert_empty_item(trans, root, path, &key,
161                                               datasize);
162                 if (ret) {
163                         err = ret;
164                         goto fail;
165                 }
166         }
167         leaf = path->nodes[0];
168         ei = btrfs_item_ptr(leaf, path->slots[0],
169                             struct btrfs_file_extent_item);
170         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
171         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
172         btrfs_set_file_extent_encryption(leaf, ei, 0);
173         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
174         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
175         ptr = btrfs_file_extent_inline_start(ei);
176 
177         if (compress_type != BTRFS_COMPRESS_NONE) {
178                 struct page *cpage;
179                 int i = 0;
180                 while (compressed_size > 0) {
181                         cpage = compressed_pages[i];
182                         cur_size = min_t(unsigned long, compressed_size,
183                                        PAGE_CACHE_SIZE);
184 
185                         kaddr = kmap_atomic(cpage);
186                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
187                         kunmap_atomic(kaddr);
188 
189                         i++;
190                         ptr += cur_size;
191                         compressed_size -= cur_size;
192                 }
193                 btrfs_set_file_extent_compression(leaf, ei,
194                                                   compress_type);
195         } else {
196                 page = find_get_page(inode->i_mapping,
197                                      start >> PAGE_CACHE_SHIFT);
198                 btrfs_set_file_extent_compression(leaf, ei, 0);
199                 kaddr = kmap_atomic(page);
200                 offset = start & (PAGE_CACHE_SIZE - 1);
201                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
202                 kunmap_atomic(kaddr);
203                 page_cache_release(page);
204         }
205         btrfs_mark_buffer_dirty(leaf);
206         btrfs_release_path(path);
207 
208         /*
209          * we're an inline extent, so nobody can
210          * extend the file past i_size without locking
211          * a page we already have locked.
212          *
213          * We must do any isize and inode updates
214          * before we unlock the pages.  Otherwise we
215          * could end up racing with unlink.
216          */
217         BTRFS_I(inode)->disk_i_size = inode->i_size;
218         ret = btrfs_update_inode(trans, root, inode);
219 
220         return ret;
221 fail:
222         return err;
223 }
224 
225 
226 /*
227  * conditionally insert an inline extent into the file.  This
228  * does the checks required to make sure the data is small enough
229  * to fit as an inline extent.
230  */
231 static noinline int cow_file_range_inline(struct btrfs_root *root,
232                                           struct inode *inode, u64 start,
233                                           u64 end, size_t compressed_size,
234                                           int compress_type,
235                                           struct page **compressed_pages)
236 {
237         struct btrfs_trans_handle *trans;
238         u64 isize = i_size_read(inode);
239         u64 actual_end = min(end + 1, isize);
240         u64 inline_len = actual_end - start;
241         u64 aligned_end = ALIGN(end, root->sectorsize);
242         u64 data_len = inline_len;
243         int ret;
244         struct btrfs_path *path;
245         int extent_inserted = 0;
246         u32 extent_item_size;
247 
248         if (compressed_size)
249                 data_len = compressed_size;
250 
251         if (start > 0 ||
252             actual_end >= PAGE_CACHE_SIZE ||
253             data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
254             (!compressed_size &&
255             (actual_end & (root->sectorsize - 1)) == 0) ||
256             end + 1 < isize ||
257             data_len > root->fs_info->max_inline) {
258                 return 1;
259         }
260 
261         path = btrfs_alloc_path();
262         if (!path)
263                 return -ENOMEM;
264 
265         trans = btrfs_join_transaction(root);
266         if (IS_ERR(trans)) {
267                 btrfs_free_path(path);
268                 return PTR_ERR(trans);
269         }
270         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
271 
272         if (compressed_size && compressed_pages)
273                 extent_item_size = btrfs_file_extent_calc_inline_size(
274                    compressed_size);
275         else
276                 extent_item_size = btrfs_file_extent_calc_inline_size(
277                     inline_len);
278 
279         ret = __btrfs_drop_extents(trans, root, inode, path,
280                                    start, aligned_end, NULL,
281                                    1, 1, extent_item_size, &extent_inserted);
282         if (ret) {
283                 btrfs_abort_transaction(trans, root, ret);
284                 goto out;
285         }
286 
287         if (isize > actual_end)
288                 inline_len = min_t(u64, isize, actual_end);
289         ret = insert_inline_extent(trans, path, extent_inserted,
290                                    root, inode, start,
291                                    inline_len, compressed_size,
292                                    compress_type, compressed_pages);
293         if (ret && ret != -ENOSPC) {
294                 btrfs_abort_transaction(trans, root, ret);
295                 goto out;
296         } else if (ret == -ENOSPC) {
297                 ret = 1;
298                 goto out;
299         }
300 
301         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
302         btrfs_delalloc_release_metadata(inode, end + 1 - start);
303         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
304 out:
305         btrfs_free_path(path);
306         btrfs_end_transaction(trans, root);
307         return ret;
308 }
309 
310 struct async_extent {
311         u64 start;
312         u64 ram_size;
313         u64 compressed_size;
314         struct page **pages;
315         unsigned long nr_pages;
316         int compress_type;
317         struct list_head list;
318 };
319 
320 struct async_cow {
321         struct inode *inode;
322         struct btrfs_root *root;
323         struct page *locked_page;
324         u64 start;
325         u64 end;
326         struct list_head extents;
327         struct btrfs_work work;
328 };
329 
330 static noinline int add_async_extent(struct async_cow *cow,
331                                      u64 start, u64 ram_size,
332                                      u64 compressed_size,
333                                      struct page **pages,
334                                      unsigned long nr_pages,
335                                      int compress_type)
336 {
337         struct async_extent *async_extent;
338 
339         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
340         BUG_ON(!async_extent); /* -ENOMEM */
341         async_extent->start = start;
342         async_extent->ram_size = ram_size;
343         async_extent->compressed_size = compressed_size;
344         async_extent->pages = pages;
345         async_extent->nr_pages = nr_pages;
346         async_extent->compress_type = compress_type;
347         list_add_tail(&async_extent->list, &cow->extents);
348         return 0;
349 }
350 
351 /*
352  * we create compressed extents in two phases.  The first
353  * phase compresses a range of pages that have already been
354  * locked (both pages and state bits are locked).
355  *
356  * This is done inside an ordered work queue, and the compression
357  * is spread across many cpus.  The actual IO submission is step
358  * two, and the ordered work queue takes care of making sure that
359  * happens in the same order things were put onto the queue by
360  * writepages and friends.
361  *
362  * If this code finds it can't get good compression, it puts an
363  * entry onto the work queue to write the uncompressed bytes.  This
364  * makes sure that both compressed inodes and uncompressed inodes
365  * are written in the same order that the flusher thread sent them
366  * down.
367  */
368 static noinline int compress_file_range(struct inode *inode,
369                                         struct page *locked_page,
370                                         u64 start, u64 end,
371                                         struct async_cow *async_cow,
372                                         int *num_added)
373 {
374         struct btrfs_root *root = BTRFS_I(inode)->root;
375         u64 num_bytes;
376         u64 blocksize = root->sectorsize;
377         u64 actual_end;
378         u64 isize = i_size_read(inode);
379         int ret = 0;
380         struct page **pages = NULL;
381         unsigned long nr_pages;
382         unsigned long nr_pages_ret = 0;
383         unsigned long total_compressed = 0;
384         unsigned long total_in = 0;
385         unsigned long max_compressed = 128 * 1024;
386         unsigned long max_uncompressed = 128 * 1024;
387         int i;
388         int will_compress;
389         int compress_type = root->fs_info->compress_type;
390         int redirty = 0;
391 
392         /* if this is a small write inside eof, kick off a defrag */
393         if ((end - start + 1) < 16 * 1024 &&
394             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
395                 btrfs_add_inode_defrag(NULL, inode);
396 
397         actual_end = min_t(u64, isize, end + 1);
398 again:
399         will_compress = 0;
400         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
401         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
402 
403         /*
404          * we don't want to send crud past the end of i_size through
405          * compression, that's just a waste of CPU time.  So, if the
406          * end of the file is before the start of our current
407          * requested range of bytes, we bail out to the uncompressed
408          * cleanup code that can deal with all of this.
409          *
410          * It isn't really the fastest way to fix things, but this is a
411          * very uncommon corner.
412          */
413         if (actual_end <= start)
414                 goto cleanup_and_bail_uncompressed;
415 
416         total_compressed = actual_end - start;
417 
418         /* we want to make sure that amount of ram required to uncompress
419          * an extent is reasonable, so we limit the total size in ram
420          * of a compressed extent to 128k.  This is a crucial number
421          * because it also controls how easily we can spread reads across
422          * cpus for decompression.
423          *
424          * We also want to make sure the amount of IO required to do
425          * a random read is reasonably small, so we limit the size of
426          * a compressed extent to 128k.
427          */
428         total_compressed = min(total_compressed, max_uncompressed);
429         num_bytes = ALIGN(end - start + 1, blocksize);
430         num_bytes = max(blocksize,  num_bytes);
431         total_in = 0;
432         ret = 0;
433 
434         /*
435          * we do compression for mount -o compress and when the
436          * inode has not been flagged as nocompress.  This flag can
437          * change at any time if we discover bad compression ratios.
438          */
439         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
440             (btrfs_test_opt(root, COMPRESS) ||
441              (BTRFS_I(inode)->force_compress) ||
442              (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
443                 WARN_ON(pages);
444                 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
445                 if (!pages) {
446                         /* just bail out to the uncompressed code */
447                         goto cont;
448                 }
449 
450                 if (BTRFS_I(inode)->force_compress)
451                         compress_type = BTRFS_I(inode)->force_compress;
452 
453                 /*
454                  * we need to call clear_page_dirty_for_io on each
455                  * page in the range.  Otherwise applications with the file
456                  * mmap'd can wander in and change the page contents while
457                  * we are compressing them.
458                  *
459                  * If the compression fails for any reason, we set the pages
460                  * dirty again later on.
461                  */
462                 extent_range_clear_dirty_for_io(inode, start, end);
463                 redirty = 1;
464                 ret = btrfs_compress_pages(compress_type,
465                                            inode->i_mapping, start,
466                                            total_compressed, pages,
467                                            nr_pages, &nr_pages_ret,
468                                            &total_in,
469                                            &total_compressed,
470                                            max_compressed);
471 
472                 if (!ret) {
473                         unsigned long offset = total_compressed &
474                                 (PAGE_CACHE_SIZE - 1);
475                         struct page *page = pages[nr_pages_ret - 1];
476                         char *kaddr;
477 
478                         /* zero the tail end of the last page, we might be
479                          * sending it down to disk
480                          */
481                         if (offset) {
482                                 kaddr = kmap_atomic(page);
483                                 memset(kaddr + offset, 0,
484                                        PAGE_CACHE_SIZE - offset);
485                                 kunmap_atomic(kaddr);
486                         }
487                         will_compress = 1;
488                 }
489         }
490 cont:
491         if (start == 0) {
492                 /* lets try to make an inline extent */
493                 if (ret || total_in < (actual_end - start)) {
494                         /* we didn't compress the entire range, try
495                          * to make an uncompressed inline extent.
496                          */
497                         ret = cow_file_range_inline(root, inode, start, end,
498                                                     0, 0, NULL);
499                 } else {
500                         /* try making a compressed inline extent */
501                         ret = cow_file_range_inline(root, inode, start, end,
502                                                     total_compressed,
503                                                     compress_type, pages);
504                 }
505                 if (ret <= 0) {
506                         unsigned long clear_flags = EXTENT_DELALLOC |
507                                 EXTENT_DEFRAG;
508                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
509 
510                         /*
511                          * inline extent creation worked or returned error,
512                          * we don't need to create any more async work items.
513                          * Unlock and free up our temp pages.
514                          */
515                         extent_clear_unlock_delalloc(inode, start, end, NULL,
516                                                      clear_flags, PAGE_UNLOCK |
517                                                      PAGE_CLEAR_DIRTY |
518                                                      PAGE_SET_WRITEBACK |
519                                                      PAGE_END_WRITEBACK);
520                         goto free_pages_out;
521                 }
522         }
523 
524         if (will_compress) {
525                 /*
526                  * we aren't doing an inline extent round the compressed size
527                  * up to a block size boundary so the allocator does sane
528                  * things
529                  */
530                 total_compressed = ALIGN(total_compressed, blocksize);
531 
532                 /*
533                  * one last check to make sure the compression is really a
534                  * win, compare the page count read with the blocks on disk
535                  */
536                 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
537                 if (total_compressed >= total_in) {
538                         will_compress = 0;
539                 } else {
540                         num_bytes = total_in;
541                 }
542         }
543         if (!will_compress && pages) {
544                 /*
545                  * the compression code ran but failed to make things smaller,
546                  * free any pages it allocated and our page pointer array
547                  */
548                 for (i = 0; i < nr_pages_ret; i++) {
549                         WARN_ON(pages[i]->mapping);
550                         page_cache_release(pages[i]);
551                 }
552                 kfree(pages);
553                 pages = NULL;
554                 total_compressed = 0;
555                 nr_pages_ret = 0;
556 
557                 /* flag the file so we don't compress in the future */
558                 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
559                     !(BTRFS_I(inode)->force_compress)) {
560                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
561                 }
562         }
563         if (will_compress) {
564                 *num_added += 1;
565 
566                 /* the async work queues will take care of doing actual
567                  * allocation on disk for these compressed pages,
568                  * and will submit them to the elevator.
569                  */
570                 add_async_extent(async_cow, start, num_bytes,
571                                  total_compressed, pages, nr_pages_ret,
572                                  compress_type);
573 
574                 if (start + num_bytes < end) {
575                         start += num_bytes;
576                         pages = NULL;
577                         cond_resched();
578                         goto again;
579                 }
580         } else {
581 cleanup_and_bail_uncompressed:
582                 /*
583                  * No compression, but we still need to write the pages in
584                  * the file we've been given so far.  redirty the locked
585                  * page if it corresponds to our extent and set things up
586                  * for the async work queue to run cow_file_range to do
587                  * the normal delalloc dance
588                  */
589                 if (page_offset(locked_page) >= start &&
590                     page_offset(locked_page) <= end) {
591                         __set_page_dirty_nobuffers(locked_page);
592                         /* unlocked later on in the async handlers */
593                 }
594                 if (redirty)
595                         extent_range_redirty_for_io(inode, start, end);
596                 add_async_extent(async_cow, start, end - start + 1,
597                                  0, NULL, 0, BTRFS_COMPRESS_NONE);
598                 *num_added += 1;
599         }
600 
601 out:
602         return ret;
603 
604 free_pages_out:
605         for (i = 0; i < nr_pages_ret; i++) {
606                 WARN_ON(pages[i]->mapping);
607                 page_cache_release(pages[i]);
608         }
609         kfree(pages);
610 
611         goto out;
612 }
613 
614 /*
615  * phase two of compressed writeback.  This is the ordered portion
616  * of the code, which only gets called in the order the work was
617  * queued.  We walk all the async extents created by compress_file_range
618  * and send them down to the disk.
619  */
620 static noinline int submit_compressed_extents(struct inode *inode,
621                                               struct async_cow *async_cow)
622 {
623         struct async_extent *async_extent;
624         u64 alloc_hint = 0;
625         struct btrfs_key ins;
626         struct extent_map *em;
627         struct btrfs_root *root = BTRFS_I(inode)->root;
628         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
629         struct extent_io_tree *io_tree;
630         int ret = 0;
631 
632         if (list_empty(&async_cow->extents))
633                 return 0;
634 
635 again:
636         while (!list_empty(&async_cow->extents)) {
637                 async_extent = list_entry(async_cow->extents.next,
638                                           struct async_extent, list);
639                 list_del(&async_extent->list);
640 
641                 io_tree = &BTRFS_I(inode)->io_tree;
642 
643 retry:
644                 /* did the compression code fall back to uncompressed IO? */
645                 if (!async_extent->pages) {
646                         int page_started = 0;
647                         unsigned long nr_written = 0;
648 
649                         lock_extent(io_tree, async_extent->start,
650                                          async_extent->start +
651                                          async_extent->ram_size - 1);
652 
653                         /* allocate blocks */
654                         ret = cow_file_range(inode, async_cow->locked_page,
655                                              async_extent->start,
656                                              async_extent->start +
657                                              async_extent->ram_size - 1,
658                                              &page_started, &nr_written, 0);
659 
660                         /* JDM XXX */
661 
662                         /*
663                          * if page_started, cow_file_range inserted an
664                          * inline extent and took care of all the unlocking
665                          * and IO for us.  Otherwise, we need to submit
666                          * all those pages down to the drive.
667                          */
668                         if (!page_started && !ret)
669                                 extent_write_locked_range(io_tree,
670                                                   inode, async_extent->start,
671                                                   async_extent->start +
672                                                   async_extent->ram_size - 1,
673                                                   btrfs_get_extent,
674                                                   WB_SYNC_ALL);
675                         else if (ret)
676                                 unlock_page(async_cow->locked_page);
677                         kfree(async_extent);
678                         cond_resched();
679                         continue;
680                 }
681 
682                 lock_extent(io_tree, async_extent->start,
683                             async_extent->start + async_extent->ram_size - 1);
684 
685                 ret = btrfs_reserve_extent(root,
686                                            async_extent->compressed_size,
687                                            async_extent->compressed_size,
688                                            0, alloc_hint, &ins, 1);
689                 if (ret) {
690                         int i;
691 
692                         for (i = 0; i < async_extent->nr_pages; i++) {
693                                 WARN_ON(async_extent->pages[i]->mapping);
694                                 page_cache_release(async_extent->pages[i]);
695                         }
696                         kfree(async_extent->pages);
697                         async_extent->nr_pages = 0;
698                         async_extent->pages = NULL;
699 
700                         if (ret == -ENOSPC) {
701                                 unlock_extent(io_tree, async_extent->start,
702                                               async_extent->start +
703                                               async_extent->ram_size - 1);
704 
705                                 /*
706                                  * we need to redirty the pages if we decide to
707                                  * fallback to uncompressed IO, otherwise we
708                                  * will not submit these pages down to lower
709                                  * layers.
710                                  */
711                                 extent_range_redirty_for_io(inode,
712                                                 async_extent->start,
713                                                 async_extent->start +
714                                                 async_extent->ram_size - 1);
715 
716                                 goto retry;
717                         }
718                         goto out_free;
719                 }
720 
721                 /*
722                  * here we're doing allocation and writeback of the
723                  * compressed pages
724                  */
725                 btrfs_drop_extent_cache(inode, async_extent->start,
726                                         async_extent->start +
727                                         async_extent->ram_size - 1, 0);
728 
729                 em = alloc_extent_map();
730                 if (!em) {
731                         ret = -ENOMEM;
732                         goto out_free_reserve;
733                 }
734                 em->start = async_extent->start;
735                 em->len = async_extent->ram_size;
736                 em->orig_start = em->start;
737                 em->mod_start = em->start;
738                 em->mod_len = em->len;
739 
740                 em->block_start = ins.objectid;
741                 em->block_len = ins.offset;
742                 em->orig_block_len = ins.offset;
743                 em->ram_bytes = async_extent->ram_size;
744                 em->bdev = root->fs_info->fs_devices->latest_bdev;
745                 em->compress_type = async_extent->compress_type;
746                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
747                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
748                 em->generation = -1;
749 
750                 while (1) {
751                         write_lock(&em_tree->lock);
752                         ret = add_extent_mapping(em_tree, em, 1);
753                         write_unlock(&em_tree->lock);
754                         if (ret != -EEXIST) {
755                                 free_extent_map(em);
756                                 break;
757                         }
758                         btrfs_drop_extent_cache(inode, async_extent->start,
759                                                 async_extent->start +
760                                                 async_extent->ram_size - 1, 0);
761                 }
762 
763                 if (ret)
764                         goto out_free_reserve;
765 
766                 ret = btrfs_add_ordered_extent_compress(inode,
767                                                 async_extent->start,
768                                                 ins.objectid,
769                                                 async_extent->ram_size,
770                                                 ins.offset,
771                                                 BTRFS_ORDERED_COMPRESSED,
772                                                 async_extent->compress_type);
773                 if (ret)
774                         goto out_free_reserve;
775 
776                 /*
777                  * clear dirty, set writeback and unlock the pages.
778                  */
779                 extent_clear_unlock_delalloc(inode, async_extent->start,
780                                 async_extent->start +
781                                 async_extent->ram_size - 1,
782                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
783                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
784                                 PAGE_SET_WRITEBACK);
785                 ret = btrfs_submit_compressed_write(inode,
786                                     async_extent->start,
787                                     async_extent->ram_size,
788                                     ins.objectid,
789                                     ins.offset, async_extent->pages,
790                                     async_extent->nr_pages);
791                 alloc_hint = ins.objectid + ins.offset;
792                 kfree(async_extent);
793                 if (ret)
794                         goto out;
795                 cond_resched();
796         }
797         ret = 0;
798 out:
799         return ret;
800 out_free_reserve:
801         btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
802 out_free:
803         extent_clear_unlock_delalloc(inode, async_extent->start,
804                                      async_extent->start +
805                                      async_extent->ram_size - 1,
806                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
807                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
808                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
809                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
810         kfree(async_extent);
811         goto again;
812 }
813 
814 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
815                                       u64 num_bytes)
816 {
817         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
818         struct extent_map *em;
819         u64 alloc_hint = 0;
820 
821         read_lock(&em_tree->lock);
822         em = search_extent_mapping(em_tree, start, num_bytes);
823         if (em) {
824                 /*
825                  * if block start isn't an actual block number then find the
826                  * first block in this inode and use that as a hint.  If that
827                  * block is also bogus then just don't worry about it.
828                  */
829                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
830                         free_extent_map(em);
831                         em = search_extent_mapping(em_tree, 0, 0);
832                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
833                                 alloc_hint = em->block_start;
834                         if (em)
835                                 free_extent_map(em);
836                 } else {
837                         alloc_hint = em->block_start;
838                         free_extent_map(em);
839                 }
840         }
841         read_unlock(&em_tree->lock);
842 
843         return alloc_hint;
844 }
845 
846 /*
847  * when extent_io.c finds a delayed allocation range in the file,
848  * the call backs end up in this code.  The basic idea is to
849  * allocate extents on disk for the range, and create ordered data structs
850  * in ram to track those extents.
851  *
852  * locked_page is the page that writepage had locked already.  We use
853  * it to make sure we don't do extra locks or unlocks.
854  *
855  * *page_started is set to one if we unlock locked_page and do everything
856  * required to start IO on it.  It may be clean and already done with
857  * IO when we return.
858  */
859 static noinline int cow_file_range(struct inode *inode,
860                                    struct page *locked_page,
861                                    u64 start, u64 end, int *page_started,
862                                    unsigned long *nr_written,
863                                    int unlock)
864 {
865         struct btrfs_root *root = BTRFS_I(inode)->root;
866         u64 alloc_hint = 0;
867         u64 num_bytes;
868         unsigned long ram_size;
869         u64 disk_num_bytes;
870         u64 cur_alloc_size;
871         u64 blocksize = root->sectorsize;
872         struct btrfs_key ins;
873         struct extent_map *em;
874         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
875         int ret = 0;
876 
877         if (btrfs_is_free_space_inode(inode)) {
878                 WARN_ON_ONCE(1);
879                 return -EINVAL;
880         }
881 
882         num_bytes = ALIGN(end - start + 1, blocksize);
883         num_bytes = max(blocksize,  num_bytes);
884         disk_num_bytes = num_bytes;
885 
886         /* if this is a small write inside eof, kick off defrag */
887         if (num_bytes < 64 * 1024 &&
888             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
889                 btrfs_add_inode_defrag(NULL, inode);
890 
891         if (start == 0) {
892                 /* lets try to make an inline extent */
893                 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
894                                             NULL);
895                 if (ret == 0) {
896                         extent_clear_unlock_delalloc(inode, start, end, NULL,
897                                      EXTENT_LOCKED | EXTENT_DELALLOC |
898                                      EXTENT_DEFRAG, PAGE_UNLOCK |
899                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
900                                      PAGE_END_WRITEBACK);
901 
902                         *nr_written = *nr_written +
903                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
904                         *page_started = 1;
905                         goto out;
906                 } else if (ret < 0) {
907                         goto out_unlock;
908                 }
909         }
910 
911         BUG_ON(disk_num_bytes >
912                btrfs_super_total_bytes(root->fs_info->super_copy));
913 
914         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
915         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
916 
917         while (disk_num_bytes > 0) {
918                 unsigned long op;
919 
920                 cur_alloc_size = disk_num_bytes;
921                 ret = btrfs_reserve_extent(root, cur_alloc_size,
922                                            root->sectorsize, 0, alloc_hint,
923                                            &ins, 1);
924                 if (ret < 0)
925                         goto out_unlock;
926 
927                 em = alloc_extent_map();
928                 if (!em) {
929                         ret = -ENOMEM;
930                         goto out_reserve;
931                 }
932                 em->start = start;
933                 em->orig_start = em->start;
934                 ram_size = ins.offset;
935                 em->len = ins.offset;
936                 em->mod_start = em->start;
937                 em->mod_len = em->len;
938 
939                 em->block_start = ins.objectid;
940                 em->block_len = ins.offset;
941                 em->orig_block_len = ins.offset;
942                 em->ram_bytes = ram_size;
943                 em->bdev = root->fs_info->fs_devices->latest_bdev;
944                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
945                 em->generation = -1;
946 
947                 while (1) {
948                         write_lock(&em_tree->lock);
949                         ret = add_extent_mapping(em_tree, em, 1);
950                         write_unlock(&em_tree->lock);
951                         if (ret != -EEXIST) {
952                                 free_extent_map(em);
953                                 break;
954                         }
955                         btrfs_drop_extent_cache(inode, start,
956                                                 start + ram_size - 1, 0);
957                 }
958                 if (ret)
959                         goto out_reserve;
960 
961                 cur_alloc_size = ins.offset;
962                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
963                                                ram_size, cur_alloc_size, 0);
964                 if (ret)
965                         goto out_reserve;
966 
967                 if (root->root_key.objectid ==
968                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
969                         ret = btrfs_reloc_clone_csums(inode, start,
970                                                       cur_alloc_size);
971                         if (ret)
972                                 goto out_reserve;
973                 }
974 
975                 if (disk_num_bytes < cur_alloc_size)
976                         break;
977 
978                 /* we're not doing compressed IO, don't unlock the first
979                  * page (which the caller expects to stay locked), don't
980                  * clear any dirty bits and don't set any writeback bits
981                  *
982                  * Do set the Private2 bit so we know this page was properly
983                  * setup for writepage
984                  */
985                 op = unlock ? PAGE_UNLOCK : 0;
986                 op |= PAGE_SET_PRIVATE2;
987 
988                 extent_clear_unlock_delalloc(inode, start,
989                                              start + ram_size - 1, locked_page,
990                                              EXTENT_LOCKED | EXTENT_DELALLOC,
991                                              op);
992                 disk_num_bytes -= cur_alloc_size;
993                 num_bytes -= cur_alloc_size;
994                 alloc_hint = ins.objectid + ins.offset;
995                 start += cur_alloc_size;
996         }
997 out:
998         return ret;
999 
1000 out_reserve:
1001         btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
1002 out_unlock:
1003         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1004                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1005                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1006                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1007                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1008         goto out;
1009 }
1010 
1011 /*
1012  * work queue call back to started compression on a file and pages
1013  */
1014 static noinline void async_cow_start(struct btrfs_work *work)
1015 {
1016         struct async_cow *async_cow;
1017         int num_added = 0;
1018         async_cow = container_of(work, struct async_cow, work);
1019 
1020         compress_file_range(async_cow->inode, async_cow->locked_page,
1021                             async_cow->start, async_cow->end, async_cow,
1022                             &num_added);
1023         if (num_added == 0) {
1024                 btrfs_add_delayed_iput(async_cow->inode);
1025                 async_cow->inode = NULL;
1026         }
1027 }
1028 
1029 /*
1030  * work queue call back to submit previously compressed pages
1031  */
1032 static noinline void async_cow_submit(struct btrfs_work *work)
1033 {
1034         struct async_cow *async_cow;
1035         struct btrfs_root *root;
1036         unsigned long nr_pages;
1037 
1038         async_cow = container_of(work, struct async_cow, work);
1039 
1040         root = async_cow->root;
1041         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1042                 PAGE_CACHE_SHIFT;
1043 
1044         if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1045             5 * 1024 * 1024 &&
1046             waitqueue_active(&root->fs_info->async_submit_wait))
1047                 wake_up(&root->fs_info->async_submit_wait);
1048 
1049         if (async_cow->inode)
1050                 submit_compressed_extents(async_cow->inode, async_cow);
1051 }
1052 
1053 static noinline void async_cow_free(struct btrfs_work *work)
1054 {
1055         struct async_cow *async_cow;
1056         async_cow = container_of(work, struct async_cow, work);
1057         if (async_cow->inode)
1058                 btrfs_add_delayed_iput(async_cow->inode);
1059         kfree(async_cow);
1060 }
1061 
1062 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1063                                 u64 start, u64 end, int *page_started,
1064                                 unsigned long *nr_written)
1065 {
1066         struct async_cow *async_cow;
1067         struct btrfs_root *root = BTRFS_I(inode)->root;
1068         unsigned long nr_pages;
1069         u64 cur_end;
1070         int limit = 10 * 1024 * 1024;
1071 
1072         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1073                          1, 0, NULL, GFP_NOFS);
1074         while (start < end) {
1075                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1076                 BUG_ON(!async_cow); /* -ENOMEM */
1077                 async_cow->inode = igrab(inode);
1078                 async_cow->root = root;
1079                 async_cow->locked_page = locked_page;
1080                 async_cow->start = start;
1081 
1082                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1083                         cur_end = end;
1084                 else
1085                         cur_end = min(end, start + 512 * 1024 - 1);
1086 
1087                 async_cow->end = cur_end;
1088                 INIT_LIST_HEAD(&async_cow->extents);
1089 
1090                 async_cow->work.func = async_cow_start;
1091                 async_cow->work.ordered_func = async_cow_submit;
1092                 async_cow->work.ordered_free = async_cow_free;
1093                 async_cow->work.flags = 0;
1094 
1095                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1096                         PAGE_CACHE_SHIFT;
1097                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1098 
1099                 btrfs_queue_worker(&root->fs_info->delalloc_workers,
1100                                    &async_cow->work);
1101 
1102                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1103                         wait_event(root->fs_info->async_submit_wait,
1104                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1105                             limit));
1106                 }
1107 
1108                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1109                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1110                         wait_event(root->fs_info->async_submit_wait,
1111                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1112                            0));
1113                 }
1114 
1115                 *nr_written += nr_pages;
1116                 start = cur_end + 1;
1117         }
1118         *page_started = 1;
1119         return 0;
1120 }
1121 
1122 static noinline int csum_exist_in_range(struct btrfs_root *root,
1123                                         u64 bytenr, u64 num_bytes)
1124 {
1125         int ret;
1126         struct btrfs_ordered_sum *sums;
1127         LIST_HEAD(list);
1128 
1129         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1130                                        bytenr + num_bytes - 1, &list, 0);
1131         if (ret == 0 && list_empty(&list))
1132                 return 0;
1133 
1134         while (!list_empty(&list)) {
1135                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1136                 list_del(&sums->list);
1137                 kfree(sums);
1138         }
1139         return 1;
1140 }
1141 
1142 /*
1143  * when nowcow writeback call back.  This checks for snapshots or COW copies
1144  * of the extents that exist in the file, and COWs the file as required.
1145  *
1146  * If no cow copies or snapshots exist, we write directly to the existing
1147  * blocks on disk
1148  */
1149 static noinline int run_delalloc_nocow(struct inode *inode,
1150                                        struct page *locked_page,
1151                               u64 start, u64 end, int *page_started, int force,
1152                               unsigned long *nr_written)
1153 {
1154         struct btrfs_root *root = BTRFS_I(inode)->root;
1155         struct btrfs_trans_handle *trans;
1156         struct extent_buffer *leaf;
1157         struct btrfs_path *path;
1158         struct btrfs_file_extent_item *fi;
1159         struct btrfs_key found_key;
1160         u64 cow_start;
1161         u64 cur_offset;
1162         u64 extent_end;
1163         u64 extent_offset;
1164         u64 disk_bytenr;
1165         u64 num_bytes;
1166         u64 disk_num_bytes;
1167         u64 ram_bytes;
1168         int extent_type;
1169         int ret, err;
1170         int type;
1171         int nocow;
1172         int check_prev = 1;
1173         bool nolock;
1174         u64 ino = btrfs_ino(inode);
1175 
1176         path = btrfs_alloc_path();
1177         if (!path) {
1178                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1179                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1180                                              EXTENT_DO_ACCOUNTING |
1181                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1182                                              PAGE_CLEAR_DIRTY |
1183                                              PAGE_SET_WRITEBACK |
1184                                              PAGE_END_WRITEBACK);
1185                 return -ENOMEM;
1186         }
1187 
1188         nolock = btrfs_is_free_space_inode(inode);
1189 
1190         if (nolock)
1191                 trans = btrfs_join_transaction_nolock(root);
1192         else
1193                 trans = btrfs_join_transaction(root);
1194 
1195         if (IS_ERR(trans)) {
1196                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1197                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1198                                              EXTENT_DO_ACCOUNTING |
1199                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1200                                              PAGE_CLEAR_DIRTY |
1201                                              PAGE_SET_WRITEBACK |
1202                                              PAGE_END_WRITEBACK);
1203                 btrfs_free_path(path);
1204                 return PTR_ERR(trans);
1205         }
1206 
1207         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1208 
1209         cow_start = (u64)-1;
1210         cur_offset = start;
1211         while (1) {
1212                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1213                                                cur_offset, 0);
1214                 if (ret < 0)
1215                         goto error;
1216                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1217                         leaf = path->nodes[0];
1218                         btrfs_item_key_to_cpu(leaf, &found_key,
1219                                               path->slots[0] - 1);
1220                         if (found_key.objectid == ino &&
1221                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1222                                 path->slots[0]--;
1223                 }
1224                 check_prev = 0;
1225 next_slot:
1226                 leaf = path->nodes[0];
1227                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1228                         ret = btrfs_next_leaf(root, path);
1229                         if (ret < 0)
1230                                 goto error;
1231                         if (ret > 0)
1232                                 break;
1233                         leaf = path->nodes[0];
1234                 }
1235 
1236                 nocow = 0;
1237                 disk_bytenr = 0;
1238                 num_bytes = 0;
1239                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1240 
1241                 if (found_key.objectid > ino)
1242                         break;
1243                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1244                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1245                         path->slots[0]++;
1246                         goto next_slot;
1247                 }
1248                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1249                     found_key.offset > end)
1250                         break;
1251 
1252                 if (found_key.offset > cur_offset) {
1253                         extent_end = found_key.offset;
1254                         extent_type = 0;
1255                         goto out_check;
1256                 }
1257 
1258                 fi = btrfs_item_ptr(leaf, path->slots[0],
1259                                     struct btrfs_file_extent_item);
1260                 extent_type = btrfs_file_extent_type(leaf, fi);
1261 
1262                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1263                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1264                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1265                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1266                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1267                         extent_end = found_key.offset +
1268                                 btrfs_file_extent_num_bytes(leaf, fi);
1269                         disk_num_bytes =
1270                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1271                         if (extent_end <= start) {
1272                                 path->slots[0]++;
1273                                 goto next_slot;
1274                         }
1275                         if (disk_bytenr == 0)
1276                                 goto out_check;
1277                         if (btrfs_file_extent_compression(leaf, fi) ||
1278                             btrfs_file_extent_encryption(leaf, fi) ||
1279                             btrfs_file_extent_other_encoding(leaf, fi))
1280                                 goto out_check;
1281                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1282                                 goto out_check;
1283                         if (btrfs_extent_readonly(root, disk_bytenr))
1284                                 goto out_check;
1285                         if (btrfs_cross_ref_exist(trans, root, ino,
1286                                                   found_key.offset -
1287                                                   extent_offset, disk_bytenr))
1288                                 goto out_check;
1289                         disk_bytenr += extent_offset;
1290                         disk_bytenr += cur_offset - found_key.offset;
1291                         num_bytes = min(end + 1, extent_end) - cur_offset;
1292                         /*
1293                          * force cow if csum exists in the range.
1294                          * this ensure that csum for a given extent are
1295                          * either valid or do not exist.
1296                          */
1297                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1298                                 goto out_check;
1299                         nocow = 1;
1300                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1301                         extent_end = found_key.offset +
1302                                 btrfs_file_extent_inline_len(leaf,
1303                                                      path->slots[0], fi);
1304                         extent_end = ALIGN(extent_end, root->sectorsize);
1305                 } else {
1306                         BUG_ON(1);
1307                 }
1308 out_check:
1309                 if (extent_end <= start) {
1310                         path->slots[0]++;
1311                         goto next_slot;
1312                 }
1313                 if (!nocow) {
1314                         if (cow_start == (u64)-1)
1315                                 cow_start = cur_offset;
1316                         cur_offset = extent_end;
1317                         if (cur_offset > end)
1318                                 break;
1319                         path->slots[0]++;
1320                         goto next_slot;
1321                 }
1322 
1323                 btrfs_release_path(path);
1324                 if (cow_start != (u64)-1) {
1325                         ret = cow_file_range(inode, locked_page,
1326                                              cow_start, found_key.offset - 1,
1327                                              page_started, nr_written, 1);
1328                         if (ret)
1329                                 goto error;
1330                         cow_start = (u64)-1;
1331                 }
1332 
1333                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1334                         struct extent_map *em;
1335                         struct extent_map_tree *em_tree;
1336                         em_tree = &BTRFS_I(inode)->extent_tree;
1337                         em = alloc_extent_map();
1338                         BUG_ON(!em); /* -ENOMEM */
1339                         em->start = cur_offset;
1340                         em->orig_start = found_key.offset - extent_offset;
1341                         em->len = num_bytes;
1342                         em->block_len = num_bytes;
1343                         em->block_start = disk_bytenr;
1344                         em->orig_block_len = disk_num_bytes;
1345                         em->ram_bytes = ram_bytes;
1346                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1347                         em->mod_start = em->start;
1348                         em->mod_len = em->len;
1349                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1350                         set_bit(EXTENT_FLAG_FILLING, &em->flags);
1351                         em->generation = -1;
1352                         while (1) {
1353                                 write_lock(&em_tree->lock);
1354                                 ret = add_extent_mapping(em_tree, em, 1);
1355                                 write_unlock(&em_tree->lock);
1356                                 if (ret != -EEXIST) {
1357                                         free_extent_map(em);
1358                                         break;
1359                                 }
1360                                 btrfs_drop_extent_cache(inode, em->start,
1361                                                 em->start + em->len - 1, 0);
1362                         }
1363                         type = BTRFS_ORDERED_PREALLOC;
1364                 } else {
1365                         type = BTRFS_ORDERED_NOCOW;
1366                 }
1367 
1368                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1369                                                num_bytes, num_bytes, type);
1370                 BUG_ON(ret); /* -ENOMEM */
1371 
1372                 if (root->root_key.objectid ==
1373                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1374                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1375                                                       num_bytes);
1376                         if (ret)
1377                                 goto error;
1378                 }
1379 
1380                 extent_clear_unlock_delalloc(inode, cur_offset,
1381                                              cur_offset + num_bytes - 1,
1382                                              locked_page, EXTENT_LOCKED |
1383                                              EXTENT_DELALLOC, PAGE_UNLOCK |
1384                                              PAGE_SET_PRIVATE2);
1385                 cur_offset = extent_end;
1386                 if (cur_offset > end)
1387                         break;
1388         }
1389         btrfs_release_path(path);
1390 
1391         if (cur_offset <= end && cow_start == (u64)-1) {
1392                 cow_start = cur_offset;
1393                 cur_offset = end;
1394         }
1395 
1396         if (cow_start != (u64)-1) {
1397                 ret = cow_file_range(inode, locked_page, cow_start, end,
1398                                      page_started, nr_written, 1);
1399                 if (ret)
1400                         goto error;
1401         }
1402 
1403 error:
1404         err = btrfs_end_transaction(trans, root);
1405         if (!ret)
1406                 ret = err;
1407 
1408         if (ret && cur_offset < end)
1409                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1410                                              locked_page, EXTENT_LOCKED |
1411                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1412                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1413                                              PAGE_CLEAR_DIRTY |
1414                                              PAGE_SET_WRITEBACK |
1415                                              PAGE_END_WRITEBACK);
1416         btrfs_free_path(path);
1417         return ret;
1418 }
1419 
1420 /*
1421  * extent_io.c call back to do delayed allocation processing
1422  */
1423 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1424                               u64 start, u64 end, int *page_started,
1425                               unsigned long *nr_written)
1426 {
1427         int ret;
1428         struct btrfs_root *root = BTRFS_I(inode)->root;
1429 
1430         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1431                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1432                                          page_started, 1, nr_written);
1433         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1434                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1435                                          page_started, 0, nr_written);
1436         } else if (!btrfs_test_opt(root, COMPRESS) &&
1437                    !(BTRFS_I(inode)->force_compress) &&
1438                    !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1439                 ret = cow_file_range(inode, locked_page, start, end,
1440                                       page_started, nr_written, 1);
1441         } else {
1442                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1443                         &BTRFS_I(inode)->runtime_flags);
1444                 ret = cow_file_range_async(inode, locked_page, start, end,
1445                                            page_started, nr_written);
1446         }
1447         return ret;
1448 }
1449 
1450 static void btrfs_split_extent_hook(struct inode *inode,
1451                                     struct extent_state *orig, u64 split)
1452 {
1453         /* not delalloc, ignore it */
1454         if (!(orig->state & EXTENT_DELALLOC))
1455                 return;
1456 
1457         spin_lock(&BTRFS_I(inode)->lock);
1458         BTRFS_I(inode)->outstanding_extents++;
1459         spin_unlock(&BTRFS_I(inode)->lock);
1460 }
1461 
1462 /*
1463  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1464  * extents so we can keep track of new extents that are just merged onto old
1465  * extents, such as when we are doing sequential writes, so we can properly
1466  * account for the metadata space we'll need.
1467  */
1468 static void btrfs_merge_extent_hook(struct inode *inode,
1469                                     struct extent_state *new,
1470                                     struct extent_state *other)
1471 {
1472         /* not delalloc, ignore it */
1473         if (!(other->state & EXTENT_DELALLOC))
1474                 return;
1475 
1476         spin_lock(&BTRFS_I(inode)->lock);
1477         BTRFS_I(inode)->outstanding_extents--;
1478         spin_unlock(&BTRFS_I(inode)->lock);
1479 }
1480 
1481 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1482                                       struct inode *inode)
1483 {
1484         spin_lock(&root->delalloc_lock);
1485         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1486                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1487                               &root->delalloc_inodes);
1488                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1489                         &BTRFS_I(inode)->runtime_flags);
1490                 root->nr_delalloc_inodes++;
1491                 if (root->nr_delalloc_inodes == 1) {
1492                         spin_lock(&root->fs_info->delalloc_root_lock);
1493                         BUG_ON(!list_empty(&root->delalloc_root));
1494                         list_add_tail(&root->delalloc_root,
1495                                       &root->fs_info->delalloc_roots);
1496                         spin_unlock(&root->fs_info->delalloc_root_lock);
1497                 }
1498         }
1499         spin_unlock(&root->delalloc_lock);
1500 }
1501 
1502 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1503                                      struct inode *inode)
1504 {
1505         spin_lock(&root->delalloc_lock);
1506         if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1507                 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1508                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1509                           &BTRFS_I(inode)->runtime_flags);
1510                 root->nr_delalloc_inodes--;
1511                 if (!root->nr_delalloc_inodes) {
1512                         spin_lock(&root->fs_info->delalloc_root_lock);
1513                         BUG_ON(list_empty(&root->delalloc_root));
1514                         list_del_init(&root->delalloc_root);
1515                         spin_unlock(&root->fs_info->delalloc_root_lock);
1516                 }
1517         }
1518         spin_unlock(&root->delalloc_lock);
1519 }
1520 
1521 /*
1522  * extent_io.c set_bit_hook, used to track delayed allocation
1523  * bytes in this file, and to maintain the list of inodes that
1524  * have pending delalloc work to be done.
1525  */
1526 static void btrfs_set_bit_hook(struct inode *inode,
1527                                struct extent_state *state, unsigned long *bits)
1528 {
1529 
1530         /*
1531          * set_bit and clear bit hooks normally require _irqsave/restore
1532          * but in this case, we are only testing for the DELALLOC
1533          * bit, which is only set or cleared with irqs on
1534          */
1535         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1536                 struct btrfs_root *root = BTRFS_I(inode)->root;
1537                 u64 len = state->end + 1 - state->start;
1538                 bool do_list = !btrfs_is_free_space_inode(inode);
1539 
1540                 if (*bits & EXTENT_FIRST_DELALLOC) {
1541                         *bits &= ~EXTENT_FIRST_DELALLOC;
1542                 } else {
1543                         spin_lock(&BTRFS_I(inode)->lock);
1544                         BTRFS_I(inode)->outstanding_extents++;
1545                         spin_unlock(&BTRFS_I(inode)->lock);
1546                 }
1547 
1548                 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1549                                      root->fs_info->delalloc_batch);
1550                 spin_lock(&BTRFS_I(inode)->lock);
1551                 BTRFS_I(inode)->delalloc_bytes += len;
1552                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1553                                          &BTRFS_I(inode)->runtime_flags))
1554                         btrfs_add_delalloc_inodes(root, inode);
1555                 spin_unlock(&BTRFS_I(inode)->lock);
1556         }
1557 }
1558 
1559 /*
1560  * extent_io.c clear_bit_hook, see set_bit_hook for why
1561  */
1562 static void btrfs_clear_bit_hook(struct inode *inode,
1563                                  struct extent_state *state,
1564                                  unsigned long *bits)
1565 {
1566         /*
1567          * set_bit and clear bit hooks normally require _irqsave/restore
1568          * but in this case, we are only testing for the DELALLOC
1569          * bit, which is only set or cleared with irqs on
1570          */
1571         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1572                 struct btrfs_root *root = BTRFS_I(inode)->root;
1573                 u64 len = state->end + 1 - state->start;
1574                 bool do_list = !btrfs_is_free_space_inode(inode);
1575 
1576                 if (*bits & EXTENT_FIRST_DELALLOC) {
1577                         *bits &= ~EXTENT_FIRST_DELALLOC;
1578                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1579                         spin_lock(&BTRFS_I(inode)->lock);
1580                         BTRFS_I(inode)->outstanding_extents--;
1581                         spin_unlock(&BTRFS_I(inode)->lock);
1582                 }
1583 
1584                 /*
1585                  * We don't reserve metadata space for space cache inodes so we
1586                  * don't need to call dellalloc_release_metadata if there is an
1587                  * error.
1588                  */
1589                 if (*bits & EXTENT_DO_ACCOUNTING &&
1590                     root != root->fs_info->tree_root)
1591                         btrfs_delalloc_release_metadata(inode, len);
1592 
1593                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1594                     && do_list && !(state->state & EXTENT_NORESERVE))
1595                         btrfs_free_reserved_data_space(inode, len);
1596 
1597                 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1598                                      root->fs_info->delalloc_batch);
1599                 spin_lock(&BTRFS_I(inode)->lock);
1600                 BTRFS_I(inode)->delalloc_bytes -= len;
1601                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1602                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1603                              &BTRFS_I(inode)->runtime_flags))
1604                         btrfs_del_delalloc_inode(root, inode);
1605                 spin_unlock(&BTRFS_I(inode)->lock);
1606         }
1607 }
1608 
1609 /*
1610  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1611  * we don't create bios that span stripes or chunks
1612  */
1613 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1614                          size_t size, struct bio *bio,
1615                          unsigned long bio_flags)
1616 {
1617         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1618         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1619         u64 length = 0;
1620         u64 map_length;
1621         int ret;
1622 
1623         if (bio_flags & EXTENT_BIO_COMPRESSED)
1624                 return 0;
1625 
1626         length = bio->bi_iter.bi_size;
1627         map_length = length;
1628         ret = btrfs_map_block(root->fs_info, rw, logical,
1629                               &map_length, NULL, 0);
1630         /* Will always return 0 with map_multi == NULL */
1631         BUG_ON(ret < 0);
1632         if (map_length < length + size)
1633                 return 1;
1634         return 0;
1635 }
1636 
1637 /*
1638  * in order to insert checksums into the metadata in large chunks,
1639  * we wait until bio submission time.   All the pages in the bio are
1640  * checksummed and sums are attached onto the ordered extent record.
1641  *
1642  * At IO completion time the cums attached on the ordered extent record
1643  * are inserted into the btree
1644  */
1645 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1646                                     struct bio *bio, int mirror_num,
1647                                     unsigned long bio_flags,
1648                                     u64 bio_offset)
1649 {
1650         struct btrfs_root *root = BTRFS_I(inode)->root;
1651         int ret = 0;
1652 
1653         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1654         BUG_ON(ret); /* -ENOMEM */
1655         return 0;
1656 }
1657 
1658 /*
1659  * in order to insert checksums into the metadata in large chunks,
1660  * we wait until bio submission time.   All the pages in the bio are
1661  * checksummed and sums are attached onto the ordered extent record.
1662  *
1663  * At IO completion time the cums attached on the ordered extent record
1664  * are inserted into the btree
1665  */
1666 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1667                           int mirror_num, unsigned long bio_flags,
1668                           u64 bio_offset)
1669 {
1670         struct btrfs_root *root = BTRFS_I(inode)->root;
1671         int ret;
1672 
1673         ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1674         if (ret)
1675                 bio_endio(bio, ret);
1676         return ret;
1677 }
1678 
1679 /*
1680  * extent_io.c submission hook. This does the right thing for csum calculation
1681  * on write, or reading the csums from the tree before a read
1682  */
1683 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1684                           int mirror_num, unsigned long bio_flags,
1685                           u64 bio_offset)
1686 {
1687         struct btrfs_root *root = BTRFS_I(inode)->root;
1688         int ret = 0;
1689         int skip_sum;
1690         int metadata = 0;
1691         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1692 
1693         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1694 
1695         if (btrfs_is_free_space_inode(inode))
1696                 metadata = 2;
1697 
1698         if (!(rw & REQ_WRITE)) {
1699                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1700                 if (ret)
1701                         goto out;
1702 
1703                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1704                         ret = btrfs_submit_compressed_read(inode, bio,
1705                                                            mirror_num,
1706                                                            bio_flags);
1707                         goto out;
1708                 } else if (!skip_sum) {
1709                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1710                         if (ret)
1711                                 goto out;
1712                 }
1713                 goto mapit;
1714         } else if (async && !skip_sum) {
1715                 /* csum items have already been cloned */
1716                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1717                         goto mapit;
1718                 /* we're doing a write, do the async checksumming */
1719                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1720                                    inode, rw, bio, mirror_num,
1721                                    bio_flags, bio_offset,
1722                                    __btrfs_submit_bio_start,
1723                                    __btrfs_submit_bio_done);
1724                 goto out;
1725         } else if (!skip_sum) {
1726                 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1727                 if (ret)
1728                         goto out;
1729         }
1730 
1731 mapit:
1732         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1733 
1734 out:
1735         if (ret < 0)
1736                 bio_endio(bio, ret);
1737         return ret;
1738 }
1739 
1740 /*
1741  * given a list of ordered sums record them in the inode.  This happens
1742  * at IO completion time based on sums calculated at bio submission time.
1743  */
1744 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1745                              struct inode *inode, u64 file_offset,
1746                              struct list_head *list)
1747 {
1748         struct btrfs_ordered_sum *sum;
1749 
1750         list_for_each_entry(sum, list, list) {
1751                 trans->adding_csums = 1;
1752                 btrfs_csum_file_blocks(trans,
1753                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1754                 trans->adding_csums = 0;
1755         }
1756         return 0;
1757 }
1758 
1759 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1760                               struct extent_state **cached_state)
1761 {
1762         WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1763         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1764                                    cached_state, GFP_NOFS);
1765 }
1766 
1767 /* see btrfs_writepage_start_hook for details on why this is required */
1768 struct btrfs_writepage_fixup {
1769         struct page *page;
1770         struct btrfs_work work;
1771 };
1772 
1773 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1774 {
1775         struct btrfs_writepage_fixup *fixup;
1776         struct btrfs_ordered_extent *ordered;
1777         struct extent_state *cached_state = NULL;
1778         struct page *page;
1779         struct inode *inode;
1780         u64 page_start;
1781         u64 page_end;
1782         int ret;
1783 
1784         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1785         page = fixup->page;
1786 again:
1787         lock_page(page);
1788         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1789                 ClearPageChecked(page);
1790                 goto out_page;
1791         }
1792 
1793         inode = page->mapping->host;
1794         page_start = page_offset(page);
1795         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1796 
1797         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1798                          &cached_state);
1799 
1800         /* already ordered? We're done */
1801         if (PagePrivate2(page))
1802                 goto out;
1803 
1804         ordered = btrfs_lookup_ordered_extent(inode, page_start);
1805         if (ordered) {
1806                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1807                                      page_end, &cached_state, GFP_NOFS);
1808                 unlock_page(page);
1809                 btrfs_start_ordered_extent(inode, ordered, 1);
1810                 btrfs_put_ordered_extent(ordered);
1811                 goto again;
1812         }
1813 
1814         ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1815         if (ret) {
1816                 mapping_set_error(page->mapping, ret);
1817                 end_extent_writepage(page, ret, page_start, page_end);
1818                 ClearPageChecked(page);
1819                 goto out;
1820          }
1821 
1822         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1823         ClearPageChecked(page);
1824         set_page_dirty(page);
1825 out:
1826         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1827                              &cached_state, GFP_NOFS);
1828 out_page:
1829         unlock_page(page);
1830         page_cache_release(page);
1831         kfree(fixup);
1832 }
1833 
1834 /*
1835  * There are a few paths in the higher layers of the kernel that directly
1836  * set the page dirty bit without asking the filesystem if it is a
1837  * good idea.  This causes problems because we want to make sure COW
1838  * properly happens and the data=ordered rules are followed.
1839  *
1840  * In our case any range that doesn't have the ORDERED bit set
1841  * hasn't been properly setup for IO.  We kick off an async process
1842  * to fix it up.  The async helper will wait for ordered extents, set
1843  * the delalloc bit and make it safe to write the page.
1844  */
1845 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1846 {
1847         struct inode *inode = page->mapping->host;
1848         struct btrfs_writepage_fixup *fixup;
1849         struct btrfs_root *root = BTRFS_I(inode)->root;
1850 
1851         /* this page is properly in the ordered list */
1852         if (TestClearPagePrivate2(page))
1853                 return 0;
1854 
1855         if (PageChecked(page))
1856                 return -EAGAIN;
1857 
1858         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1859         if (!fixup)
1860                 return -EAGAIN;
1861 
1862         SetPageChecked(page);
1863         page_cache_get(page);
1864         fixup->work.func = btrfs_writepage_fixup_worker;
1865         fixup->page = page;
1866         btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1867         return -EBUSY;
1868 }
1869 
1870 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1871                                        struct inode *inode, u64 file_pos,
1872                                        u64 disk_bytenr, u64 disk_num_bytes,
1873                                        u64 num_bytes, u64 ram_bytes,
1874                                        u8 compression, u8 encryption,
1875                                        u16 other_encoding, int extent_type)
1876 {
1877         struct btrfs_root *root = BTRFS_I(inode)->root;
1878         struct btrfs_file_extent_item *fi;
1879         struct btrfs_path *path;
1880         struct extent_buffer *leaf;
1881         struct btrfs_key ins;
1882         int extent_inserted = 0;
1883         int ret;
1884 
1885         path = btrfs_alloc_path();
1886         if (!path)
1887                 return -ENOMEM;
1888 
1889         /*
1890          * we may be replacing one extent in the tree with another.
1891          * The new extent is pinned in the extent map, and we don't want
1892          * to drop it from the cache until it is completely in the btree.
1893          *
1894          * So, tell btrfs_drop_extents to leave this extent in the cache.
1895          * the caller is expected to unpin it and allow it to be merged
1896          * with the others.
1897          */
1898         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
1899                                    file_pos + num_bytes, NULL, 0,
1900                                    1, sizeof(*fi), &extent_inserted);
1901         if (ret)
1902                 goto out;
1903 
1904         if (!extent_inserted) {
1905                 ins.objectid = btrfs_ino(inode);
1906                 ins.offset = file_pos;
1907                 ins.type = BTRFS_EXTENT_DATA_KEY;
1908 
1909                 path->leave_spinning = 1;
1910                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
1911                                               sizeof(*fi));
1912                 if (ret)
1913                         goto out;
1914         }
1915         leaf = path->nodes[0];
1916         fi = btrfs_item_ptr(leaf, path->slots[0],
1917                             struct btrfs_file_extent_item);
1918         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1919         btrfs_set_file_extent_type(leaf, fi, extent_type);
1920         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1921         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1922         btrfs_set_file_extent_offset(leaf, fi, 0);
1923         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1924         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1925         btrfs_set_file_extent_compression(leaf, fi, compression);
1926         btrfs_set_file_extent_encryption(leaf, fi, encryption);
1927         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1928 
1929         btrfs_mark_buffer_dirty(leaf);
1930         btrfs_release_path(path);
1931 
1932         inode_add_bytes(inode, num_bytes);
1933 
1934         ins.objectid = disk_bytenr;
1935         ins.offset = disk_num_bytes;
1936         ins.type = BTRFS_EXTENT_ITEM_KEY;
1937         ret = btrfs_alloc_reserved_file_extent(trans, root,
1938                                         root->root_key.objectid,
1939                                         btrfs_ino(inode), file_pos, &ins);
1940 out:
1941         btrfs_free_path(path);
1942 
1943         return ret;
1944 }
1945 
1946 /* snapshot-aware defrag */
1947 struct sa_defrag_extent_backref {
1948         struct rb_node node;
1949         struct old_sa_defrag_extent *old;
1950         u64 root_id;
1951         u64 inum;
1952         u64 file_pos;
1953         u64 extent_offset;
1954         u64 num_bytes;
1955         u64 generation;
1956 };
1957 
1958 struct old_sa_defrag_extent {
1959         struct list_head list;
1960         struct new_sa_defrag_extent *new;
1961 
1962         u64 extent_offset;
1963         u64 bytenr;
1964         u64 offset;
1965         u64 len;
1966         int count;
1967 };
1968 
1969 struct new_sa_defrag_extent {
1970         struct rb_root root;
1971         struct list_head head;
1972         struct btrfs_path *path;
1973         struct inode *inode;
1974         u64 file_pos;
1975         u64 len;
1976         u64 bytenr;
1977         u64 disk_len;
1978         u8 compress_type;
1979 };
1980 
1981 static int backref_comp(struct sa_defrag_extent_backref *b1,
1982                         struct sa_defrag_extent_backref *b2)
1983 {
1984         if (b1->root_id < b2->root_id)
1985                 return -1;
1986         else if (b1->root_id > b2->root_id)
1987                 return 1;
1988 
1989         if (b1->inum < b2->inum)
1990                 return -1;
1991         else if (b1->inum > b2->inum)
1992                 return 1;
1993 
1994         if (b1->file_pos < b2->file_pos)
1995                 return -1;
1996         else if (b1->file_pos > b2->file_pos)
1997                 return 1;
1998 
1999         /*
2000          * [------------------------------] ===> (a range of space)
2001          *     |<--->|   |<---->| =============> (fs/file tree A)
2002          * |<---------------------------->| ===> (fs/file tree B)
2003          *
2004          * A range of space can refer to two file extents in one tree while
2005          * refer to only one file extent in another tree.
2006          *
2007          * So we may process a disk offset more than one time(two extents in A)
2008          * and locate at the same extent(one extent in B), then insert two same
2009          * backrefs(both refer to the extent in B).
2010          */
2011         return 0;
2012 }
2013 
2014 static void backref_insert(struct rb_root *root,
2015                            struct sa_defrag_extent_backref *backref)
2016 {
2017         struct rb_node **p = &root->rb_node;
2018         struct rb_node *parent = NULL;
2019         struct sa_defrag_extent_backref *entry;
2020         int ret;
2021 
2022         while (*p) {
2023                 parent = *p;
2024                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2025 
2026                 ret = backref_comp(backref, entry);
2027                 if (ret < 0)
2028                         p = &(*p)->rb_left;
2029                 else
2030                         p = &(*p)->rb_right;
2031         }
2032 
2033         rb_link_node(&backref->node, parent, p);
2034         rb_insert_color(&backref->node, root);
2035 }
2036 
2037 /*
2038  * Note the backref might has changed, and in this case we just return 0.
2039  */
2040 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2041                                        void *ctx)
2042 {
2043         struct btrfs_file_extent_item *extent;
2044         struct btrfs_fs_info *fs_info;
2045         struct old_sa_defrag_extent *old = ctx;
2046         struct new_sa_defrag_extent *new = old->new;
2047         struct btrfs_path *path = new->path;
2048         struct btrfs_key key;
2049         struct btrfs_root *root;
2050         struct sa_defrag_extent_backref *backref;
2051         struct extent_buffer *leaf;
2052         struct inode *inode = new->inode;
2053         int slot;
2054         int ret;
2055         u64 extent_offset;
2056         u64 num_bytes;
2057 
2058         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2059             inum == btrfs_ino(inode))
2060                 return 0;
2061 
2062         key.objectid = root_id;
2063         key.type = BTRFS_ROOT_ITEM_KEY;
2064         key.offset = (u64)-1;
2065 
2066         fs_info = BTRFS_I(inode)->root->fs_info;
2067         root = btrfs_read_fs_root_no_name(fs_info, &key);
2068         if (IS_ERR(root)) {
2069                 if (PTR_ERR(root) == -ENOENT)
2070                         return 0;
2071                 WARN_ON(1);
2072                 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2073                          inum, offset, root_id);
2074                 return PTR_ERR(root);
2075         }
2076 
2077         key.objectid = inum;
2078         key.type = BTRFS_EXTENT_DATA_KEY;
2079         if (offset > (u64)-1 << 32)
2080                 key.offset = 0;
2081         else
2082                 key.offset = offset;
2083 
2084         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2085         if (WARN_ON(ret < 0))
2086                 return ret;
2087         ret = 0;
2088 
2089         while (1) {
2090                 cond_resched();
2091 
2092                 leaf = path->nodes[0];
2093                 slot = path->slots[0];
2094 
2095                 if (slot >= btrfs_header_nritems(leaf)) {
2096                         ret = btrfs_next_leaf(root, path);
2097                         if (ret < 0) {
2098                                 goto out;
2099                         } else if (ret > 0) {
2100                                 ret = 0;
2101                                 goto out;
2102                         }
2103                         continue;
2104                 }
2105 
2106                 path->slots[0]++;
2107 
2108                 btrfs_item_key_to_cpu(leaf, &key, slot);
2109 
2110                 if (key.objectid > inum)
2111                         goto out;
2112 
2113                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2114                         continue;
2115 
2116                 extent = btrfs_item_ptr(leaf, slot,
2117                                         struct btrfs_file_extent_item);
2118 
2119                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2120                         continue;
2121 
2122                 /*
2123                  * 'offset' refers to the exact key.offset,
2124                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2125                  * (key.offset - extent_offset).
2126                  */
2127                 if (key.offset != offset)
2128                         continue;
2129 
2130                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2131                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2132 
2133                 if (extent_offset >= old->extent_offset + old->offset +
2134                     old->len || extent_offset + num_bytes <=
2135                     old->extent_offset + old->offset)
2136                         continue;
2137                 break;
2138         }
2139 
2140         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2141         if (!backref) {
2142                 ret = -ENOENT;
2143                 goto out;
2144         }
2145 
2146         backref->root_id = root_id;
2147         backref->inum = inum;
2148         backref->file_pos = offset;
2149         backref->num_bytes = num_bytes;
2150         backref->extent_offset = extent_offset;
2151         backref->generation = btrfs_file_extent_generation(leaf, extent);
2152         backref->old = old;
2153         backref_insert(&new->root, backref);
2154         old->count++;
2155 out:
2156         btrfs_release_path(path);
2157         WARN_ON(ret);
2158         return ret;
2159 }
2160 
2161 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2162                                    struct new_sa_defrag_extent *new)
2163 {
2164         struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2165         struct old_sa_defrag_extent *old, *tmp;
2166         int ret;
2167 
2168         new->path = path;
2169 
2170         list_for_each_entry_safe(old, tmp, &new->head, list) {
2171                 ret = iterate_inodes_from_logical(old->bytenr +
2172                                                   old->extent_offset, fs_info,
2173                                                   path, record_one_backref,
2174                                                   old);
2175                 if (ret < 0 && ret != -ENOENT)
2176                         return false;
2177 
2178                 /* no backref to be processed for this extent */
2179                 if (!old->count) {
2180                         list_del(&old->list);
2181                         kfree(old);
2182                 }
2183         }
2184 
2185         if (list_empty(&new->head))
2186                 return false;
2187 
2188         return true;
2189 }
2190 
2191 static int relink_is_mergable(struct extent_buffer *leaf,
2192                               struct btrfs_file_extent_item *fi,
2193                               struct new_sa_defrag_extent *new)
2194 {
2195         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2196                 return 0;
2197 
2198         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2199                 return 0;
2200 
2201         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2202                 return 0;
2203 
2204         if (btrfs_file_extent_encryption(leaf, fi) ||
2205             btrfs_file_extent_other_encoding(leaf, fi))
2206                 return 0;
2207 
2208         return 1;
2209 }
2210 
2211 /*
2212  * Note the backref might has changed, and in this case we just return 0.
2213  */
2214 static noinline int relink_extent_backref(struct btrfs_path *path,
2215                                  struct sa_defrag_extent_backref *prev,
2216                                  struct sa_defrag_extent_backref *backref)
2217 {
2218         struct btrfs_file_extent_item *extent;
2219         struct btrfs_file_extent_item *item;
2220         struct btrfs_ordered_extent *ordered;
2221         struct btrfs_trans_handle *trans;
2222         struct btrfs_fs_info *fs_info;
2223         struct btrfs_root *root;
2224         struct btrfs_key key;
2225         struct extent_buffer *leaf;
2226         struct old_sa_defrag_extent *old = backref->old;
2227         struct new_sa_defrag_extent *new = old->new;
2228         struct inode *src_inode = new->inode;
2229         struct inode *inode;
2230         struct extent_state *cached = NULL;
2231         int ret = 0;
2232         u64 start;
2233         u64 len;
2234         u64 lock_start;
2235         u64 lock_end;
2236         bool merge = false;
2237         int index;
2238 
2239         if (prev && prev->root_id == backref->root_id &&
2240             prev->inum == backref->inum &&
2241             prev->file_pos + prev->num_bytes == backref->file_pos)
2242                 merge = true;
2243 
2244         /* step 1: get root */
2245         key.objectid = backref->root_id;
2246         key.type = BTRFS_ROOT_ITEM_KEY;
2247         key.offset = (u64)-1;
2248 
2249         fs_info = BTRFS_I(src_inode)->root->fs_info;
2250         index = srcu_read_lock(&fs_info->subvol_srcu);
2251 
2252         root = btrfs_read_fs_root_no_name(fs_info, &key);
2253         if (IS_ERR(root)) {
2254                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2255                 if (PTR_ERR(root) == -ENOENT)
2256                         return 0;
2257                 return PTR_ERR(root);
2258         }
2259 
2260         /* step 2: get inode */
2261         key.objectid = backref->inum;
2262         key.type = BTRFS_INODE_ITEM_KEY;
2263         key.offset = 0;
2264 
2265         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2266         if (IS_ERR(inode)) {
2267                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2268                 return 0;
2269         }
2270 
2271         srcu_read_unlock(&fs_info->subvol_srcu, index);
2272 
2273         /* step 3: relink backref */
2274         lock_start = backref->file_pos;
2275         lock_end = backref->file_pos + backref->num_bytes - 1;
2276         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2277                          0, &cached);
2278 
2279         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2280         if (ordered) {
2281                 btrfs_put_ordered_extent(ordered);
2282                 goto out_unlock;
2283         }
2284 
2285         trans = btrfs_join_transaction(root);
2286         if (IS_ERR(trans)) {
2287                 ret = PTR_ERR(trans);
2288                 goto out_unlock;
2289         }
2290 
2291         key.objectid = backref->inum;
2292         key.type = BTRFS_EXTENT_DATA_KEY;
2293         key.offset = backref->file_pos;
2294 
2295         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2296         if (ret < 0) {
2297                 goto out_free_path;
2298         } else if (ret > 0) {
2299                 ret = 0;
2300                 goto out_free_path;
2301         }
2302 
2303         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2304                                 struct btrfs_file_extent_item);
2305 
2306         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2307             backref->generation)
2308                 goto out_free_path;
2309 
2310         btrfs_release_path(path);
2311 
2312         start = backref->file_pos;
2313         if (backref->extent_offset < old->extent_offset + old->offset)
2314                 start += old->extent_offset + old->offset -
2315                          backref->extent_offset;
2316 
2317         len = min(backref->extent_offset + backref->num_bytes,
2318                   old->extent_offset + old->offset + old->len);
2319         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2320 
2321         ret = btrfs_drop_extents(trans, root, inode, start,
2322                                  start + len, 1);
2323         if (ret)
2324                 goto out_free_path;
2325 again:
2326         key.objectid = btrfs_ino(inode);
2327         key.type = BTRFS_EXTENT_DATA_KEY;
2328         key.offset = start;
2329 
2330         path->leave_spinning = 1;
2331         if (merge) {
2332                 struct btrfs_file_extent_item *fi;
2333                 u64 extent_len;
2334                 struct btrfs_key found_key;
2335 
2336                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2337                 if (ret < 0)
2338                         goto out_free_path;
2339 
2340                 path->slots[0]--;
2341                 leaf = path->nodes[0];
2342                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2343 
2344                 fi = btrfs_item_ptr(leaf, path->slots[0],
2345                                     struct btrfs_file_extent_item);
2346                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2347 
2348                 if (extent_len + found_key.offset == start &&
2349                     relink_is_mergable(leaf, fi, new)) {
2350                         btrfs_set_file_extent_num_bytes(leaf, fi,
2351                                                         extent_len + len);
2352                         btrfs_mark_buffer_dirty(leaf);
2353                         inode_add_bytes(inode, len);
2354 
2355                         ret = 1;
2356                         goto out_free_path;
2357                 } else {
2358                         merge = false;
2359                         btrfs_release_path(path);
2360                         goto again;
2361                 }
2362         }
2363 
2364         ret = btrfs_insert_empty_item(trans, root, path, &key,
2365                                         sizeof(*extent));
2366         if (ret) {
2367                 btrfs_abort_transaction(trans, root, ret);
2368                 goto out_free_path;
2369         }
2370 
2371         leaf = path->nodes[0];
2372         item = btrfs_item_ptr(leaf, path->slots[0],
2373                                 struct btrfs_file_extent_item);
2374         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2375         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2376         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2377         btrfs_set_file_extent_num_bytes(leaf, item, len);
2378         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2379         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2380         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2381         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2382         btrfs_set_file_extent_encryption(leaf, item, 0);
2383         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2384 
2385         btrfs_mark_buffer_dirty(leaf);
2386         inode_add_bytes(inode, len);
2387         btrfs_release_path(path);
2388 
2389         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2390                         new->disk_len, 0,
2391                         backref->root_id, backref->inum,
2392                         new->file_pos, 0);      /* start - extent_offset */
2393         if (ret) {
2394                 btrfs_abort_transaction(trans, root, ret);
2395                 goto out_free_path;
2396         }
2397 
2398         ret = 1;
2399 out_free_path:
2400         btrfs_release_path(path);
2401         path->leave_spinning = 0;
2402         btrfs_end_transaction(trans, root);
2403 out_unlock:
2404         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2405                              &cached, GFP_NOFS);
2406         iput(inode);
2407         return ret;
2408 }
2409 
2410 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2411 {
2412         struct old_sa_defrag_extent *old, *tmp;
2413 
2414         if (!new)
2415                 return;
2416 
2417         list_for_each_entry_safe(old, tmp, &new->head, list) {
2418                 list_del(&old->list);
2419                 kfree(old);
2420         }
2421         kfree(new);
2422 }
2423 
2424 static void relink_file_extents(struct new_sa_defrag_extent *new)
2425 {
2426         struct btrfs_path *path;
2427         struct sa_defrag_extent_backref *backref;
2428         struct sa_defrag_extent_backref *prev = NULL;
2429         struct inode *inode;
2430         struct btrfs_root *root;
2431         struct rb_node *node;
2432         int ret;
2433 
2434         inode = new->inode;
2435         root = BTRFS_I(inode)->root;
2436 
2437         path = btrfs_alloc_path();
2438         if (!path)
2439                 return;
2440 
2441         if (!record_extent_backrefs(path, new)) {
2442                 btrfs_free_path(path);
2443                 goto out;
2444         }
2445         btrfs_release_path(path);
2446 
2447         while (1) {
2448                 node = rb_first(&new->root);
2449                 if (!node)
2450                         break;
2451                 rb_erase(node, &new->root);
2452 
2453                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2454 
2455                 ret = relink_extent_backref(path, prev, backref);
2456                 WARN_ON(ret < 0);
2457 
2458                 kfree(prev);
2459 
2460                 if (ret == 1)
2461                         prev = backref;
2462                 else
2463                         prev = NULL;
2464                 cond_resched();
2465         }
2466         kfree(prev);
2467 
2468         btrfs_free_path(path);
2469 out:
2470         free_sa_defrag_extent(new);
2471 
2472         atomic_dec(&root->fs_info->defrag_running);
2473         wake_up(&root->fs_info->transaction_wait);
2474 }
2475 
2476 static struct new_sa_defrag_extent *
2477 record_old_file_extents(struct inode *inode,
2478                         struct btrfs_ordered_extent *ordered)
2479 {
2480         struct btrfs_root *root = BTRFS_I(inode)->root;
2481         struct btrfs_path *path;
2482         struct btrfs_key key;
2483         struct old_sa_defrag_extent *old;
2484         struct new_sa_defrag_extent *new;
2485         int ret;
2486 
2487         new = kmalloc(sizeof(*new), GFP_NOFS);
2488         if (!new)
2489                 return NULL;
2490 
2491         new->inode = inode;
2492         new->file_pos = ordered->file_offset;
2493         new->len = ordered->len;
2494         new->bytenr = ordered->start;
2495         new->disk_len = ordered->disk_len;
2496         new->compress_type = ordered->compress_type;
2497         new->root = RB_ROOT;
2498         INIT_LIST_HEAD(&new->head);
2499 
2500         path = btrfs_alloc_path();
2501         if (!path)
2502                 goto out_kfree;
2503 
2504         key.objectid = btrfs_ino(inode);
2505         key.type = BTRFS_EXTENT_DATA_KEY;
2506         key.offset = new->file_pos;
2507 
2508         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2509         if (ret < 0)
2510                 goto out_free_path;
2511         if (ret > 0 && path->slots[0] > 0)
2512                 path->slots[0]--;
2513 
2514         /* find out all the old extents for the file range */
2515         while (1) {
2516                 struct btrfs_file_extent_item *extent;
2517                 struct extent_buffer *l;
2518                 int slot;
2519                 u64 num_bytes;
2520                 u64 offset;
2521                 u64 end;
2522                 u64 disk_bytenr;
2523                 u64 extent_offset;
2524 
2525                 l = path->nodes[0];
2526                 slot = path->slots[0];
2527 
2528                 if (slot >= btrfs_header_nritems(l)) {
2529                         ret = btrfs_next_leaf(root, path);
2530                         if (ret < 0)
2531                                 goto out_free_path;
2532                         else if (ret > 0)
2533                                 break;
2534                         continue;
2535                 }
2536 
2537                 btrfs_item_key_to_cpu(l, &key, slot);
2538 
2539                 if (key.objectid != btrfs_ino(inode))
2540                         break;
2541                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2542                         break;
2543                 if (key.offset >= new->file_pos + new->len)
2544                         break;
2545 
2546                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2547 
2548                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2549                 if (key.offset + num_bytes < new->file_pos)
2550                         goto next;
2551 
2552                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2553                 if (!disk_bytenr)
2554                         goto next;
2555 
2556                 extent_offset = btrfs_file_extent_offset(l, extent);
2557 
2558                 old = kmalloc(sizeof(*old), GFP_NOFS);
2559                 if (!old)
2560                         goto out_free_path;
2561 
2562                 offset = max(new->file_pos, key.offset);
2563                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2564 
2565                 old->bytenr = disk_bytenr;
2566                 old->extent_offset = extent_offset;
2567                 old->offset = offset - key.offset;
2568                 old->len = end - offset;
2569                 old->new = new;
2570                 old->count = 0;
2571                 list_add_tail(&old->list, &new->head);
2572 next:
2573                 path->slots[0]++;
2574                 cond_resched();
2575         }
2576 
2577         btrfs_free_path(path);
2578         atomic_inc(&root->fs_info->defrag_running);
2579 
2580         return new;
2581 
2582 out_free_path:
2583         btrfs_free_path(path);
2584 out_kfree:
2585         free_sa_defrag_extent(new);
2586         return NULL;
2587 }
2588 
2589 /* as ordered data IO finishes, this gets called so we can finish
2590  * an ordered extent if the range of bytes in the file it covers are
2591  * fully written.
2592  */
2593 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2594 {
2595         struct inode *inode = ordered_extent->inode;
2596         struct btrfs_root *root = BTRFS_I(inode)->root;
2597         struct btrfs_trans_handle *trans = NULL;
2598         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2599         struct extent_state *cached_state = NULL;
2600         struct new_sa_defrag_extent *new = NULL;
2601         int compress_type = 0;
2602         int ret = 0;
2603         u64 logical_len = ordered_extent->len;
2604         bool nolock;
2605         bool truncated = false;
2606 
2607         nolock = btrfs_is_free_space_inode(inode);
2608 
2609         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2610                 ret = -EIO;
2611                 goto out;
2612         }
2613 
2614         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2615                 truncated = true;
2616                 logical_len = ordered_extent->truncated_len;
2617                 /* Truncated the entire extent, don't bother adding */
2618                 if (!logical_len)
2619                         goto out;
2620         }
2621 
2622         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2623                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2624                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2625                 if (nolock)
2626                         trans = btrfs_join_transaction_nolock(root);
2627                 else
2628                         trans = btrfs_join_transaction(root);
2629                 if (IS_ERR(trans)) {
2630                         ret = PTR_ERR(trans);
2631                         trans = NULL;
2632                         goto out;
2633                 }
2634                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2635                 ret = btrfs_update_inode_fallback(trans, root, inode);
2636                 if (ret) /* -ENOMEM or corruption */
2637                         btrfs_abort_transaction(trans, root, ret);
2638                 goto out;
2639         }
2640 
2641         lock_extent_bits(io_tree, ordered_extent->file_offset,
2642                          ordered_extent->file_offset + ordered_extent->len - 1,
2643                          0, &cached_state);
2644 
2645         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2646                         ordered_extent->file_offset + ordered_extent->len - 1,
2647                         EXTENT_DEFRAG, 1, cached_state);
2648         if (ret) {
2649                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2650                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2651                         /* the inode is shared */
2652                         new = record_old_file_extents(inode, ordered_extent);
2653 
2654                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2655                         ordered_extent->file_offset + ordered_extent->len - 1,
2656                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2657         }
2658 
2659         if (nolock)
2660                 trans = btrfs_join_transaction_nolock(root);
2661         else
2662                 trans = btrfs_join_transaction(root);
2663         if (IS_ERR(trans)) {
2664                 ret = PTR_ERR(trans);
2665                 trans = NULL;
2666                 goto out_unlock;
2667         }
2668         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2669 
2670         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2671                 compress_type = ordered_extent->compress_type;
2672         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2673                 BUG_ON(compress_type);
2674                 ret = btrfs_mark_extent_written(trans, inode,
2675                                                 ordered_extent->file_offset,
2676                                                 ordered_extent->file_offset +
2677                                                 logical_len);
2678         } else {
2679                 BUG_ON(root == root->fs_info->tree_root);
2680                 ret = insert_reserved_file_extent(trans, inode,
2681                                                 ordered_extent->file_offset,
2682                                                 ordered_extent->start,
2683                                                 ordered_extent->disk_len,
2684                                                 logical_len, logical_len,
2685                                                 compress_type, 0, 0,
2686                                                 BTRFS_FILE_EXTENT_REG);
2687         }
2688         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2689                            ordered_extent->file_offset, ordered_extent->len,
2690                            trans->transid);
2691         if (ret < 0) {
2692                 btrfs_abort_transaction(trans, root, ret);
2693                 goto out_unlock;
2694         }
2695 
2696         add_pending_csums(trans, inode, ordered_extent->file_offset,
2697                           &ordered_extent->list);
2698 
2699         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2700         ret = btrfs_update_inode_fallback(trans, root, inode);
2701         if (ret) { /* -ENOMEM or corruption */
2702                 btrfs_abort_transaction(trans, root, ret);
2703                 goto out_unlock;
2704         }
2705         ret = 0;
2706 out_unlock:
2707         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2708                              ordered_extent->file_offset +
2709                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2710 out:
2711         if (root != root->fs_info->tree_root)
2712                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2713         if (trans)
2714                 btrfs_end_transaction(trans, root);
2715 
2716         if (ret || truncated) {
2717                 u64 start, end;
2718 
2719                 if (truncated)
2720                         start = ordered_extent->file_offset + logical_len;
2721                 else
2722                         start = ordered_extent->file_offset;
2723                 end = ordered_extent->file_offset + ordered_extent->len - 1;
2724                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2725 
2726                 /* Drop the cache for the part of the extent we didn't write. */
2727                 btrfs_drop_extent_cache(inode, start, end, 0);
2728 
2729                 /*
2730                  * If the ordered extent had an IOERR or something else went
2731                  * wrong we need to return the space for this ordered extent
2732                  * back to the allocator.  We only free the extent in the
2733                  * truncated case if we didn't write out the extent at all.
2734                  */
2735                 if ((ret || !logical_len) &&
2736                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2737                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2738                         btrfs_free_reserved_extent(root, ordered_extent->start,
2739                                                    ordered_extent->disk_len);
2740         }
2741 
2742 
2743         /*
2744          * This needs to be done to make sure anybody waiting knows we are done
2745          * updating everything for this ordered extent.
2746          */
2747         btrfs_remove_ordered_extent(inode, ordered_extent);
2748 
2749         /* for snapshot-aware defrag */
2750         if (new) {
2751                 if (ret) {
2752                         free_sa_defrag_extent(new);
2753                         atomic_dec(&root->fs_info->defrag_running);
2754                 } else {
2755                         relink_file_extents(new);
2756                 }
2757         }
2758 
2759         /* once for us */
2760         btrfs_put_ordered_extent(ordered_extent);
2761         /* once for the tree */
2762         btrfs_put_ordered_extent(ordered_extent);
2763 
2764         return ret;
2765 }
2766 
2767 static void finish_ordered_fn(struct btrfs_work *work)
2768 {
2769         struct btrfs_ordered_extent *ordered_extent;
2770         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2771         btrfs_finish_ordered_io(ordered_extent);
2772 }
2773 
2774 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2775                                 struct extent_state *state, int uptodate)
2776 {
2777         struct inode *inode = page->mapping->host;
2778         struct btrfs_root *root = BTRFS_I(inode)->root;
2779         struct btrfs_ordered_extent *ordered_extent = NULL;
2780         struct btrfs_workers *workers;
2781 
2782         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2783 
2784         ClearPagePrivate2(page);
2785         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2786                                             end - start + 1, uptodate))
2787                 return 0;
2788 
2789         ordered_extent->work.func = finish_ordered_fn;
2790         ordered_extent->work.flags = 0;
2791 
2792         if (btrfs_is_free_space_inode(inode))
2793                 workers = &root->fs_info->endio_freespace_worker;
2794         else
2795                 workers = &root->fs_info->endio_write_workers;
2796         btrfs_queue_worker(workers, &ordered_extent->work);
2797 
2798         return 0;
2799 }
2800 
2801 /*
2802  * when reads are done, we need to check csums to verify the data is correct
2803  * if there's a match, we allow the bio to finish.  If not, the code in
2804  * extent_io.c will try to find good copies for us.
2805  */
2806 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
2807                                       u64 phy_offset, struct page *page,
2808                                       u64 start, u64 end, int mirror)
2809 {
2810         size_t offset = start - page_offset(page);
2811         struct inode *inode = page->mapping->host;
2812         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2813         char *kaddr;
2814         struct btrfs_root *root = BTRFS_I(inode)->root;
2815         u32 csum_expected;
2816         u32 csum = ~(u32)0;
2817         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
2818                                       DEFAULT_RATELIMIT_BURST);
2819 
2820         if (PageChecked(page)) {
2821                 ClearPageChecked(page);
2822                 goto good;
2823         }
2824 
2825         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2826                 goto good;
2827 
2828         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2829             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2830                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2831                                   GFP_NOFS);
2832                 return 0;
2833         }
2834 
2835         phy_offset >>= inode->i_sb->s_blocksize_bits;
2836         csum_expected = *(((u32 *)io_bio->csum) + phy_offset);
2837 
2838         kaddr = kmap_atomic(page);
2839         csum = btrfs_csum_data(kaddr + offset, csum,  end - start + 1);
2840         btrfs_csum_final(csum, (char *)&csum);
2841         if (csum != csum_expected)
2842                 goto zeroit;
2843 
2844         kunmap_atomic(kaddr);
2845 good:
2846         return 0;
2847 
2848 zeroit:
2849         if (__ratelimit(&_rs))
2850                 btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
2851                         btrfs_ino(page->mapping->host), start, csum, csum_expected);
2852         memset(kaddr + offset, 1, end - start + 1);
2853         flush_dcache_page(page);
2854         kunmap_atomic(kaddr);
2855         if (csum_expected == 0)
2856                 return 0;
2857         return -EIO;
2858 }
2859 
2860 struct delayed_iput {
2861         struct list_head list;
2862         struct inode *inode;
2863 };
2864 
2865 /* JDM: If this is fs-wide, why can't we add a pointer to
2866  * btrfs_inode instead and avoid the allocation? */
2867 void btrfs_add_delayed_iput(struct inode *inode)
2868 {
2869         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2870         struct delayed_iput *delayed;
2871 
2872         if (atomic_add_unless(&inode->i_count, -1, 1))
2873                 return;
2874 
2875         delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2876         delayed->inode = inode;
2877 
2878         spin_lock(&fs_info->delayed_iput_lock);
2879         list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2880         spin_unlock(&fs_info->delayed_iput_lock);
2881 }
2882 
2883 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2884 {
2885         LIST_HEAD(list);
2886         struct btrfs_fs_info *fs_info = root->fs_info;
2887         struct delayed_iput *delayed;
2888         int empty;
2889 
2890         spin_lock(&fs_info->delayed_iput_lock);
2891         empty = list_empty(&fs_info->delayed_iputs);
2892         spin_unlock(&fs_info->delayed_iput_lock);
2893         if (empty)
2894                 return;
2895 
2896         spin_lock(&fs_info->delayed_iput_lock);
2897         list_splice_init(&fs_info->delayed_iputs, &list);
2898         spin_unlock(&fs_info->delayed_iput_lock);
2899 
2900         while (!list_empty(&list)) {
2901                 delayed = list_entry(list.next, struct delayed_iput, list);
2902                 list_del(&delayed->list);
2903                 iput(delayed->inode);
2904                 kfree(delayed);
2905         }
2906 }
2907 
2908 /*
2909  * This is called in transaction commit time. If there are no orphan
2910  * files in the subvolume, it removes orphan item and frees block_rsv
2911  * structure.
2912  */
2913 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2914                               struct btrfs_root *root)
2915 {
2916         struct btrfs_block_rsv *block_rsv;
2917         int ret;
2918 
2919         if (atomic_read(&root->orphan_inodes) ||
2920             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2921                 return;
2922 
2923         spin_lock(&root->orphan_lock);
2924         if (atomic_read(&root->orphan_inodes)) {
2925                 spin_unlock(&root->orphan_lock);
2926                 return;
2927         }
2928 
2929         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2930                 spin_unlock(&root->orphan_lock);
2931                 return;
2932         }
2933 
2934         block_rsv = root->orphan_block_rsv;
2935         root->orphan_block_rsv = NULL;
2936         spin_unlock(&root->orphan_lock);
2937 
2938         if (root->orphan_item_inserted &&
2939             btrfs_root_refs(&root->root_item) > 0) {
2940                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2941                                             root->root_key.objectid);
2942                 if (ret)
2943                         btrfs_abort_transaction(trans, root, ret);
2944                 else
2945                         root->orphan_item_inserted = 0;
2946         }
2947 
2948         if (block_rsv) {
2949                 WARN_ON(block_rsv->size > 0);
2950                 btrfs_free_block_rsv(root, block_rsv);
2951         }
2952 }
2953 
2954 /*
2955  * This creates an orphan entry for the given inode in case something goes
2956  * wrong in the middle of an unlink/truncate.
2957  *
2958  * NOTE: caller of this function should reserve 5 units of metadata for
2959  *       this function.
2960  */
2961 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2962 {
2963         struct btrfs_root *root = BTRFS_I(inode)->root;
2964         struct btrfs_block_rsv *block_rsv = NULL;
2965         int reserve = 0;
2966         int insert = 0;
2967         int ret;
2968 
2969         if (!root->orphan_block_rsv) {
2970                 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2971                 if (!block_rsv)
2972                         return -ENOMEM;
2973         }
2974 
2975         spin_lock(&root->orphan_lock);
2976         if (!root->orphan_block_rsv) {
2977                 root->orphan_block_rsv = block_rsv;
2978         } else if (block_rsv) {
2979                 btrfs_free_block_rsv(root, block_rsv);
2980                 block_rsv = NULL;
2981         }
2982 
2983         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2984                               &BTRFS_I(inode)->runtime_flags)) {
2985 #if 0
2986                 /*
2987                  * For proper ENOSPC handling, we should do orphan
2988                  * cleanup when mounting. But this introduces backward
2989                  * compatibility issue.
2990                  */
2991                 if (!xchg(&root->orphan_item_inserted, 1))
2992                         insert = 2;
2993                 else
2994                         insert = 1;
2995 #endif
2996                 insert = 1;
2997                 atomic_inc(&root->orphan_inodes);
2998         }
2999 
3000         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3001                               &BTRFS_I(inode)->runtime_flags))
3002                 reserve = 1;
3003         spin_unlock(&root->orphan_lock);
3004 
3005         /* grab metadata reservation from transaction handle */
3006         if (reserve) {
3007                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3008                 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3009         }
3010 
3011         /* insert an orphan item to track this unlinked/truncated file */
3012         if (insert >= 1) {
3013                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3014                 if (ret) {
3015                         atomic_dec(&root->orphan_inodes);
3016                         if (reserve) {
3017                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3018                                           &BTRFS_I(inode)->runtime_flags);
3019                                 btrfs_orphan_release_metadata(inode);
3020                         }
3021                         if (ret != -EEXIST) {
3022                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3023                                           &BTRFS_I(inode)->runtime_flags);
3024                                 btrfs_abort_transaction(trans, root, ret);
3025                                 return ret;
3026                         }
3027                 }
3028                 ret = 0;
3029         }
3030 
3031         /* insert an orphan item to track subvolume contains orphan files */
3032         if (insert >= 2) {
3033                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3034                                                root->root_key.objectid);
3035                 if (ret && ret != -EEXIST) {
3036                         btrfs_abort_transaction(trans, root, ret);
3037                         return ret;
3038                 }
3039         }
3040         return 0;
3041 }
3042 
3043 /*
3044  * We have done the truncate/delete so we can go ahead and remove the orphan
3045  * item for this particular inode.
3046  */
3047 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3048                             struct inode *inode)
3049 {
3050         struct btrfs_root *root = BTRFS_I(inode)->root;
3051         int delete_item = 0;
3052         int release_rsv = 0;
3053         int ret = 0;
3054 
3055         spin_lock(&root->orphan_lock);
3056         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3057                                &BTRFS_I(inode)->runtime_flags))
3058                 delete_item = 1;
3059 
3060         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3061                                &BTRFS_I(inode)->runtime_flags))
3062                 release_rsv = 1;
3063         spin_unlock(&root->orphan_lock);
3064 
3065         if (delete_item) {
3066                 atomic_dec(&root->orphan_inodes);
3067                 if (trans)
3068                         ret = btrfs_del_orphan_item(trans, root,
3069                                                     btrfs_ino(inode));
3070         }
3071 
3072         if (release_rsv)
3073                 btrfs_orphan_release_metadata(inode);
3074 
3075         return ret;
3076 }
3077 
3078 /*
3079  * this cleans up any orphans that may be left on the list from the last use
3080  * of this root.
3081  */
3082 int btrfs_orphan_cleanup(struct btrfs_root *root)
3083 {
3084         struct btrfs_path *path;
3085         struct extent_buffer *leaf;
3086         struct btrfs_key key, found_key;
3087         struct btrfs_trans_handle *trans;
3088         struct inode *inode;
3089         u64 last_objectid = 0;
3090         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3091 
3092         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3093                 return 0;
3094 
3095         path = btrfs_alloc_path();
3096         if (!path) {
3097                 ret = -ENOMEM;
3098                 goto out;
3099         }
3100         path->reada = -1;
3101 
3102         key.objectid = BTRFS_ORPHAN_OBJECTID;
3103         btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
3104         key.offset = (u64)-1;
3105 
3106         while (1) {
3107                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3108                 if (ret < 0)
3109                         goto out;
3110 
3111                 /*
3112                  * if ret == 0 means we found what we were searching for, which
3113                  * is weird, but possible, so only screw with path if we didn't
3114                  * find the key and see if we have stuff that matches
3115                  */
3116                 if (ret > 0) {
3117                         ret = 0;
3118                         if (path->slots[0] == 0)
3119                                 break;
3120                         path->slots[0]--;
3121                 }
3122 
3123                 /* pull out the item */
3124                 leaf = path->nodes[0];
3125                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3126 
3127                 /* make sure the item matches what we want */
3128                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3129                         break;
3130                 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
3131                         break;
3132 
3133                 /* release the path since we're done with it */
3134                 btrfs_release_path(path);
3135 
3136                 /*
3137                  * this is where we are basically btrfs_lookup, without the
3138                  * crossing root thing.  we store the inode number in the
3139                  * offset of the orphan item.
3140                  */
3141 
3142                 if (found_key.offset == last_objectid) {
3143                         btrfs_err(root->fs_info,
3144                                 "Error removing orphan entry, stopping orphan cleanup");
3145                         ret = -EINVAL;
3146                         goto out;
3147                 }
3148 
3149                 last_objectid = found_key.offset;
3150 
3151                 found_key.objectid = found_key.offset;
3152                 found_key.type = BTRFS_INODE_ITEM_KEY;
3153                 found_key.offset = 0;
3154                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3155                 ret = PTR_ERR_OR_ZERO(inode);
3156                 if (ret && ret != -ESTALE)
3157                         goto out;
3158 
3159                 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3160                         struct btrfs_root *dead_root;
3161                         struct btrfs_fs_info *fs_info = root->fs_info;
3162                         int is_dead_root = 0;
3163 
3164                         /*
3165                          * this is an orphan in the tree root. Currently these
3166                          * could come from 2 sources:
3167                          *  a) a snapshot deletion in progress
3168                          *  b) a free space cache inode
3169                          * We need to distinguish those two, as the snapshot
3170                          * orphan must not get deleted.
3171                          * find_dead_roots already ran before us, so if this
3172                          * is a snapshot deletion, we should find the root
3173                          * in the dead_roots list
3174                          */
3175                         spin_lock(&fs_info->trans_lock);
3176                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3177                                             root_list) {
3178                                 if (dead_root->root_key.objectid ==
3179                                     found_key.objectid) {
3180                                         is_dead_root = 1;
3181                                         break;
3182                                 }
3183                         }
3184                         spin_unlock(&fs_info->trans_lock);
3185                         if (is_dead_root) {
3186                                 /* prevent this orphan from being found again */
3187                                 key.offset = found_key.objectid - 1;
3188                                 continue;
3189                         }
3190                 }
3191                 /*
3192                  * Inode is already gone but the orphan item is still there,
3193                  * kill the orphan item.
3194                  */
3195                 if (ret == -ESTALE) {
3196                         trans = btrfs_start_transaction(root, 1);
3197                         if (IS_ERR(trans)) {
3198                                 ret = PTR_ERR(trans);
3199                                 goto out;
3200                         }
3201                         btrfs_debug(root->fs_info, "auto deleting %Lu",
3202                                 found_key.objectid);
3203                         ret = btrfs_del_orphan_item(trans, root,
3204                                                     found_key.objectid);
3205                         btrfs_end_transaction(trans, root);
3206                         if (ret)
3207                                 goto out;
3208                         continue;
3209                 }
3210 
3211                 /*
3212                  * add this inode to the orphan list so btrfs_orphan_del does
3213                  * the proper thing when we hit it
3214                  */
3215                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3216                         &BTRFS_I(inode)->runtime_flags);
3217                 atomic_inc(&root->orphan_inodes);
3218 
3219                 /* if we have links, this was a truncate, lets do that */
3220                 if (inode->i_nlink) {
3221                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3222                                 iput(inode);
3223                                 continue;
3224                         }
3225                         nr_truncate++;
3226 
3227                         /* 1 for the orphan item deletion. */
3228                         trans = btrfs_start_transaction(root, 1);
3229                         if (IS_ERR(trans)) {
3230                                 iput(inode);
3231                                 ret = PTR_ERR(trans);
3232                                 goto out;
3233                         }
3234                         ret = btrfs_orphan_add(trans, inode);
3235                         btrfs_end_transaction(trans, root);
3236                         if (ret) {
3237                                 iput(inode);
3238                                 goto out;
3239                         }
3240 
3241                         ret = btrfs_truncate(inode);
3242                         if (ret)
3243                                 btrfs_orphan_del(NULL, inode);
3244                 } else {
3245                         nr_unlink++;
3246                 }
3247 
3248                 /* this will do delete_inode and everything for us */
3249                 iput(inode);
3250                 if (ret)
3251                         goto out;
3252         }
3253         /* release the path since we're done with it */
3254         btrfs_release_path(path);
3255 
3256         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3257 
3258         if (root->orphan_block_rsv)
3259                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3260                                         (u64)-1);
3261 
3262         if (root->orphan_block_rsv || root->orphan_item_inserted) {
3263                 trans = btrfs_join_transaction(root);
3264                 if (!IS_ERR(trans))
3265                         btrfs_end_transaction(trans, root);
3266         }
3267 
3268         if (nr_unlink)
3269                 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3270         if (nr_truncate)
3271                 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3272 
3273 out:
3274         if (ret)
3275                 btrfs_crit(root->fs_info,
3276                         "could not do orphan cleanup %d", ret);
3277         btrfs_free_path(path);
3278         return ret;
3279 }
3280 
3281 /*
3282  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3283  * don't find any xattrs, we know there can't be any acls.
3284  *
3285  * slot is the slot the inode is in, objectid is the objectid of the inode
3286  */
3287 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3288                                           int slot, u64 objectid,
3289                                           int *first_xattr_slot)
3290 {
3291         u32 nritems = btrfs_header_nritems(leaf);
3292         struct btrfs_key found_key;
3293         static u64 xattr_access = 0;
3294         static u64 xattr_default = 0;
3295         int scanned = 0;
3296 
3297         if (!xattr_access) {
3298                 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3299                                         strlen(POSIX_ACL_XATTR_ACCESS));
3300                 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3301                                         strlen(POSIX_ACL_XATTR_DEFAULT));
3302         }
3303 
3304         slot++;
3305         *first_xattr_slot = -1;
3306         while (slot < nritems) {
3307                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3308 
3309                 /* we found a different objectid, there must not be acls */
3310                 if (found_key.objectid != objectid)
3311                         return 0;
3312 
3313                 /* we found an xattr, assume we've got an acl */
3314                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3315                         if (*first_xattr_slot == -1)
3316                                 *first_xattr_slot = slot;
3317                         if (found_key.offset == xattr_access ||
3318                             found_key.offset == xattr_default)
3319                                 return 1;
3320                 }
3321 
3322                 /*
3323                  * we found a key greater than an xattr key, there can't
3324                  * be any acls later on
3325                  */
3326                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3327                         return 0;
3328 
3329                 slot++;
3330                 scanned++;
3331 
3332                 /*
3333                  * it goes inode, inode backrefs, xattrs, extents,
3334                  * so if there are a ton of hard links to an inode there can
3335                  * be a lot of backrefs.  Don't waste time searching too hard,
3336                  * this is just an optimization
3337                  */
3338                 if (scanned >= 8)
3339                         break;
3340         }
3341         /* we hit the end of the leaf before we found an xattr or
3342          * something larger than an xattr.  We have to assume the inode
3343          * has acls
3344          */
3345         if (*first_xattr_slot == -1)
3346                 *first_xattr_slot = slot;
3347         return 1;
3348 }
3349 
3350 /*
3351  * read an inode from the btree into the in-memory inode
3352  */
3353 static void btrfs_read_locked_inode(struct inode *inode)
3354 {
3355         struct btrfs_path *path;
3356         struct extent_buffer *leaf;
3357         struct btrfs_inode_item *inode_item;
3358         struct btrfs_timespec *tspec;
3359         struct btrfs_root *root = BTRFS_I(inode)->root;
3360         struct btrfs_key location;
3361         unsigned long ptr;
3362         int maybe_acls;
3363         u32 rdev;
3364         int ret;
3365         bool filled = false;
3366         int first_xattr_slot;
3367 
3368         ret = btrfs_fill_inode(inode, &rdev);
3369         if (!ret)
3370                 filled = true;
3371 
3372         path = btrfs_alloc_path();
3373         if (!path)
3374                 goto make_bad;
3375 
3376         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3377 
3378         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3379         if (ret)
3380                 goto make_bad;
3381 
3382         leaf = path->nodes[0];
3383 
3384         if (filled)
3385                 goto cache_index;
3386 
3387         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3388                                     struct btrfs_inode_item);
3389         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3390         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3391         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3392         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3393         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3394 
3395         tspec = btrfs_inode_atime(inode_item);
3396         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3397         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3398 
3399         tspec = btrfs_inode_mtime(inode_item);
3400         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3401         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3402 
3403         tspec = btrfs_inode_ctime(inode_item);
3404         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3405         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3406 
3407         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3408         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3409         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3410 
3411         /*
3412          * If we were modified in the current generation and evicted from memory
3413          * and then re-read we need to do a full sync since we don't have any
3414          * idea about which extents were modified before we were evicted from
3415          * cache.
3416          */
3417         if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3418                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3419                         &BTRFS_I(inode)->runtime_flags);
3420 
3421         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3422         inode->i_generation = BTRFS_I(inode)->generation;
3423         inode->i_rdev = 0;
3424         rdev = btrfs_inode_rdev(leaf, inode_item);
3425 
3426         BTRFS_I(inode)->index_cnt = (u64)-1;
3427         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3428 
3429 cache_index:
3430         path->slots[0]++;
3431         if (inode->i_nlink != 1 ||
3432             path->slots[0] >= btrfs_header_nritems(leaf))
3433                 goto cache_acl;
3434 
3435         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3436         if (location.objectid != btrfs_ino(inode))
3437                 goto cache_acl;
3438 
3439         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3440         if (location.type == BTRFS_INODE_REF_KEY) {
3441                 struct btrfs_inode_ref *ref;
3442 
3443                 ref = (struct btrfs_inode_ref *)ptr;
3444                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3445         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3446                 struct btrfs_inode_extref *extref;
3447 
3448                 extref = (struct btrfs_inode_extref *)ptr;
3449                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3450                                                                      extref);
3451         }
3452 cache_acl:
3453         /*
3454          * try to precache a NULL acl entry for files that don't have
3455          * any xattrs or acls
3456          */
3457         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3458                                            btrfs_ino(inode), &first_xattr_slot);
3459         if (first_xattr_slot != -1) {
3460                 path->slots[0] = first_xattr_slot;
3461                 ret = btrfs_load_inode_props(inode, path);
3462                 if (ret)
3463                         btrfs_err(root->fs_info,
3464                                   "error loading props for ino %llu (root %llu): %d\n",
3465                                   btrfs_ino(inode),
3466                                   root->root_key.objectid, ret);
3467         }
3468         btrfs_free_path(path);
3469 
3470         if (!maybe_acls)
3471                 cache_no_acl(inode);
3472 
3473         switch (inode->i_mode & S_IFMT) {
3474         case S_IFREG:
3475                 inode->i_mapping->a_ops = &btrfs_aops;
3476                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3477                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3478                 inode->i_fop = &btrfs_file_operations;
3479                 inode->i_op = &btrfs_file_inode_operations;
3480                 break;
3481         case S_IFDIR:
3482                 inode->i_fop = &btrfs_dir_file_operations;
3483                 if (root == root->fs_info->tree_root)
3484                         inode->i_op = &btrfs_dir_ro_inode_operations;
3485                 else
3486                         inode->i_op = &btrfs_dir_inode_operations;
3487                 break;
3488         case S_IFLNK:
3489                 inode->i_op = &btrfs_symlink_inode_operations;
3490                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3491                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3492                 break;
3493         default:
3494                 inode->i_op = &btrfs_special_inode_operations;
3495                 init_special_inode(inode, inode->i_mode, rdev);
3496                 break;
3497         }
3498 
3499         btrfs_update_iflags(inode);
3500         return;
3501 
3502 make_bad:
3503         btrfs_free_path(path);
3504         make_bad_inode(inode);
3505 }
3506 
3507 /*
3508  * given a leaf and an inode, copy the inode fields into the leaf
3509  */
3510 static void fill_inode_item(struct btrfs_trans_handle *trans,
3511                             struct extent_buffer *leaf,
3512                             struct btrfs_inode_item *item,
3513                             struct inode *inode)
3514 {
3515         struct btrfs_map_token token;
3516 
3517         btrfs_init_map_token(&token);
3518 
3519         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3520         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3521         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3522                                    &token);
3523         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3524         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3525 
3526         btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3527                                      inode->i_atime.tv_sec, &token);
3528         btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3529                                       inode->i_atime.tv_nsec, &token);
3530 
3531         btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3532                                      inode->i_mtime.tv_sec, &token);
3533         btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3534                                       inode->i_mtime.tv_nsec, &token);
3535 
3536         btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3537                                      inode->i_ctime.tv_sec, &token);
3538         btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3539                                       inode->i_ctime.tv_nsec, &token);
3540 
3541         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3542                                      &token);
3543         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3544                                          &token);
3545         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3546         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3547         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3548         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3549         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3550 }
3551 
3552 /*
3553  * copy everything in the in-memory inode into the btree.
3554  */
3555 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3556                                 struct btrfs_root *root, struct inode *inode)
3557 {
3558         struct btrfs_inode_item *inode_item;
3559         struct btrfs_path *path;
3560         struct extent_buffer *leaf;
3561         int ret;
3562 
3563         path = btrfs_alloc_path();
3564         if (!path)
3565                 return -ENOMEM;
3566 
3567         path->leave_spinning = 1;
3568         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3569                                  1);
3570         if (ret) {
3571                 if (ret > 0)
3572                         ret = -ENOENT;
3573                 goto failed;
3574         }
3575 
3576         leaf = path->nodes[0];
3577         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3578                                     struct btrfs_inode_item);
3579 
3580         fill_inode_item(trans, leaf, inode_item, inode);
3581         btrfs_mark_buffer_dirty(leaf);
3582         btrfs_set_inode_last_trans(trans, inode);
3583         ret = 0;
3584 failed:
3585         btrfs_free_path(path);
3586         return ret;
3587 }
3588 
3589 /*
3590  * copy everything in the in-memory inode into the btree.
3591  */
3592 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3593                                 struct btrfs_root *root, struct inode *inode)
3594 {
3595         int ret;
3596 
3597         /*
3598          * If the inode is a free space inode, we can deadlock during commit
3599          * if we put it into the delayed code.
3600          *
3601          * The data relocation inode should also be directly updated
3602          * without delay
3603          */
3604         if (!btrfs_is_free_space_inode(inode)
3605             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3606             && !root->fs_info->log_root_recovering) {
3607                 btrfs_update_root_times(trans, root);
3608 
3609                 ret = btrfs_delayed_update_inode(trans, root, inode);
3610                 if (!ret)
3611                         btrfs_set_inode_last_trans(trans, inode);
3612                 return ret;
3613         }
3614 
3615         return btrfs_update_inode_item(trans, root, inode);
3616 }
3617 
3618 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3619                                          struct btrfs_root *root,
3620                                          struct inode *inode)
3621 {
3622         int ret;
3623 
3624         ret = btrfs_update_inode(trans, root, inode);
3625         if (ret == -ENOSPC)
3626                 return btrfs_update_inode_item(trans, root, inode);
3627         return ret;
3628 }
3629 
3630 /*
3631  * unlink helper that gets used here in inode.c and in the tree logging
3632  * recovery code.  It remove a link in a directory with a given name, and
3633  * also drops the back refs in the inode to the directory
3634  */
3635 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3636                                 struct btrfs_root *root,
3637                                 struct inode *dir, struct inode *inode,
3638                                 const char *name, int name_len)
3639 {
3640         struct btrfs_path *path;
3641         int ret = 0;
3642         struct extent_buffer *leaf;
3643         struct btrfs_dir_item *di;
3644         struct btrfs_key key;
3645         u64 index;
3646         u64 ino = btrfs_ino(inode);
3647         u64 dir_ino = btrfs_ino(dir);
3648 
3649         path = btrfs_alloc_path();
3650         if (!path) {
3651                 ret = -ENOMEM;
3652                 goto out;
3653         }
3654 
3655         path->leave_spinning = 1;
3656         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3657                                     name, name_len, -1);
3658         if (IS_ERR(di)) {
3659                 ret = PTR_ERR(di);
3660                 goto err;
3661         }
3662         if (!di) {
3663                 ret = -ENOENT;
3664                 goto err;
3665         }
3666         leaf = path->nodes[0];
3667         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3668         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3669         if (ret)
3670                 goto err;
3671         btrfs_release_path(path);
3672 
3673         /*
3674          * If we don't have dir index, we have to get it by looking up
3675          * the inode ref, since we get the inode ref, remove it directly,
3676          * it is unnecessary to do delayed deletion.
3677          *
3678          * But if we have dir index, needn't search inode ref to get it.
3679          * Since the inode ref is close to the inode item, it is better
3680          * that we delay to delete it, and just do this deletion when
3681          * we update the inode item.
3682          */
3683         if (BTRFS_I(inode)->dir_index) {
3684                 ret = btrfs_delayed_delete_inode_ref(inode);
3685                 if (!ret) {
3686                         index = BTRFS_I(inode)->dir_index;
3687                         goto skip_backref;
3688                 }
3689         }
3690 
3691         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3692                                   dir_ino, &index);
3693         if (ret) {
3694                 btrfs_info(root->fs_info,
3695                         "failed to delete reference to %.*s, inode %llu parent %llu",
3696                         name_len, name, ino, dir_ino);
3697                 btrfs_abort_transaction(trans, root, ret);
3698                 goto err;
3699         }
3700 skip_backref:
3701         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3702         if (ret) {
3703                 btrfs_abort_transaction(trans, root, ret);
3704                 goto err;
3705         }
3706 
3707         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3708                                          inode, dir_ino);
3709         if (ret != 0 && ret != -ENOENT) {
3710                 btrfs_abort_transaction(trans, root, ret);
3711                 goto err;
3712         }
3713 
3714         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3715                                            dir, index);
3716         if (ret == -ENOENT)
3717                 ret = 0;
3718         else if (ret)
3719                 btrfs_abort_transaction(trans, root, ret);
3720 err:
3721         btrfs_free_path(path);
3722         if (ret)
3723                 goto out;
3724 
3725         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3726         inode_inc_iversion(inode);
3727         inode_inc_iversion(dir);
3728         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3729         ret = btrfs_update_inode(trans, root, dir);
3730 out:
3731         return ret;
3732 }
3733 
3734 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3735                        struct btrfs_root *root,
3736                        struct inode *dir, struct inode *inode,
3737                        const char *name, int name_len)
3738 {
3739         int ret;
3740         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3741         if (!ret) {
3742                 drop_nlink(inode);
3743                 ret = btrfs_update_inode(trans, root, inode);
3744         }
3745         return ret;
3746 }
3747 
3748 /*
3749  * helper to start transaction for unlink and rmdir.
3750  *
3751  * unlink and rmdir are special in btrfs, they do not always free space, so
3752  * if we cannot make our reservations the normal way try and see if there is
3753  * plenty of slack room in the global reserve to migrate, otherwise we cannot
3754  * allow the unlink to occur.
3755  */
3756 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
3757 {
3758         struct btrfs_trans_handle *trans;
3759         struct btrfs_root *root = BTRFS_I(dir)->root;
3760         int ret;
3761 
3762         /*
3763          * 1 for the possible orphan item
3764          * 1 for the dir item
3765          * 1 for the dir index
3766          * 1 for the inode ref
3767          * 1 for the inode
3768          */
3769         trans = btrfs_start_transaction(root, 5);
3770         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3771                 return trans;
3772 
3773         if (PTR_ERR(trans) == -ENOSPC) {
3774                 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
3775 
3776                 trans = btrfs_start_transaction(root, 0);
3777                 if (IS_ERR(trans))
3778                         return trans;
3779                 ret = btrfs_cond_migrate_bytes(root->fs_info,
3780                                                &root->fs_info->trans_block_rsv,
3781                                                num_bytes, 5);
3782                 if (ret) {
3783                         btrfs_end_transaction(trans, root);
3784                         return ERR_PTR(ret);
3785                 }
3786                 trans->block_rsv = &root->fs_info->trans_block_rsv;
3787                 trans->bytes_reserved = num_bytes;
3788         }
3789         return trans;
3790 }
3791 
3792 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3793 {
3794         struct btrfs_root *root = BTRFS_I(dir)->root;
3795         struct btrfs_trans_handle *trans;
3796         struct inode *inode = dentry->d_inode;
3797         int ret;
3798 
3799         trans = __unlink_start_trans(dir);
3800         if (IS_ERR(trans))
3801                 return PTR_ERR(trans);
3802 
3803         btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3804 
3805         ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3806                                  dentry->d_name.name, dentry->d_name.len);
3807         if (ret)
3808                 goto out;
3809 
3810         if (inode->i_nlink == 0) {
3811                 ret = btrfs_orphan_add(trans, inode);
3812                 if (ret)
3813                         goto out;
3814         }
3815 
3816 out:
3817         btrfs_end_transaction(trans, root);
3818         btrfs_btree_balance_dirty(root);
3819         return ret;
3820 }
3821 
3822 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3823                         struct btrfs_root *root,
3824                         struct inode *dir, u64 objectid,
3825                         const char *name, int name_len)
3826 {
3827         struct btrfs_path *path;
3828         struct extent_buffer *leaf;
3829         struct btrfs_dir_item *di;
3830         struct btrfs_key key;
3831         u64 index;
3832         int ret;
3833         u64 dir_ino = btrfs_ino(dir);
3834 
3835         path = btrfs_alloc_path();
3836         if (!path)
3837                 return -ENOMEM;
3838 
3839         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3840                                    name, name_len, -1);
3841         if (IS_ERR_OR_NULL(di)) {
3842                 if (!di)
3843                         ret = -ENOENT;
3844                 else
3845                         ret = PTR_ERR(di);
3846                 goto out;
3847         }
3848 
3849         leaf = path->nodes[0];
3850         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3851         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3852         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3853         if (ret) {
3854                 btrfs_abort_transaction(trans, root, ret);
3855                 goto out;
3856         }
3857         btrfs_release_path(path);
3858 
3859         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3860                                  objectid, root->root_key.objectid,
3861                                  dir_ino, &index, name, name_len);
3862         if (ret < 0) {
3863                 if (ret != -ENOENT) {
3864                         btrfs_abort_transaction(trans, root, ret);
3865                         goto out;
3866                 }
3867                 di = btrfs_search_dir_index_item(root, path, dir_ino,
3868                                                  name, name_len);
3869                 if (IS_ERR_OR_NULL(di)) {
3870                         if (!di)
3871                                 ret = -ENOENT;
3872                         else
3873                                 ret = PTR_ERR(di);
3874                         btrfs_abort_transaction(trans, root, ret);
3875                         goto out;
3876                 }
3877 
3878                 leaf = path->nodes[0];
3879                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3880                 btrfs_release_path(path);
3881                 index = key.offset;
3882         }
3883         btrfs_release_path(path);
3884 
3885         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3886         if (ret) {
3887                 btrfs_abort_transaction(trans, root, ret);
3888                 goto out;
3889         }
3890 
3891         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3892         inode_inc_iversion(dir);
3893         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3894         ret = btrfs_update_inode_fallback(trans, root, dir);
3895         if (ret)
3896                 btrfs_abort_transaction(trans, root, ret);
3897 out:
3898         btrfs_free_path(path);
3899         return ret;
3900 }
3901 
3902 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3903 {
3904         struct inode *inode = dentry->d_inode;
3905         int err = 0;
3906         struct btrfs_root *root = BTRFS_I(dir)->root;
3907         struct btrfs_trans_handle *trans;
3908 
3909         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3910                 return -ENOTEMPTY;
3911         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3912                 return -EPERM;
3913 
3914         trans = __unlink_start_trans(dir);
3915         if (IS_ERR(trans))
3916                 return PTR_ERR(trans);
3917 
3918         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3919                 err = btrfs_unlink_subvol(trans, root, dir,
3920                                           BTRFS_I(inode)->location.objectid,
3921                                           dentry->d_name.name,
3922                                           dentry->d_name.len);
3923                 goto out;
3924         }
3925 
3926         err = btrfs_orphan_add(trans, inode);
3927         if (err)
3928                 goto out;
3929 
3930         /* now the directory is empty */
3931         err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3932                                  dentry->d_name.name, dentry->d_name.len);
3933         if (!err)
3934                 btrfs_i_size_write(inode, 0);
3935 out:
3936         btrfs_end_transaction(trans, root);
3937         btrfs_btree_balance_dirty(root);
3938 
3939         return err;
3940 }
3941 
3942 /*
3943  * this can truncate away extent items, csum items and directory items.
3944  * It starts at a high offset and removes keys until it can't find
3945  * any higher than new_size
3946  *
3947  * csum items that cross the new i_size are truncated to the new size
3948  * as well.
3949  *
3950  * min_type is the minimum key type to truncate down to.  If set to 0, this
3951  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3952  */
3953 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3954                                struct btrfs_root *root,
3955                                struct inode *inode,
3956                                u64 new_size, u32 min_type)
3957 {
3958         struct btrfs_path *path;
3959         struct extent_buffer *leaf;
3960         struct btrfs_file_extent_item *fi;
3961         struct btrfs_key key;
3962         struct btrfs_key found_key;
3963         u64 extent_start = 0;
3964         u64 extent_num_bytes = 0;
3965         u64 extent_offset = 0;
3966         u64 item_end = 0;
3967         u64 last_size = (u64)-1;
3968         u32 found_type = (u8)-1;
3969         int found_extent;
3970         int del_item;
3971         int pending_del_nr = 0;
3972         int pending_del_slot = 0;
3973         int extent_type = -1;
3974         int ret;
3975         int err = 0;
3976         u64 ino = btrfs_ino(inode);
3977 
3978         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3979 
3980         path = btrfs_alloc_path();
3981         if (!path)
3982                 return -ENOMEM;
3983         path->reada = -1;
3984 
3985         /*
3986          * We want to drop from the next block forward in case this new size is
3987          * not block aligned since we will be keeping the last block of the
3988          * extent just the way it is.
3989          */
3990         if (root->ref_cows || root == root->fs_info->tree_root)
3991                 btrfs_drop_extent_cache(inode, ALIGN(new_size,
3992                                         root->sectorsize), (u64)-1, 0);
3993 
3994         /*
3995          * This function is also used to drop the items in the log tree before
3996          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3997          * it is used to drop the loged items. So we shouldn't kill the delayed
3998          * items.
3999          */
4000         if (min_type == 0 && root == BTRFS_I(inode)->root)
4001                 btrfs_kill_delayed_inode_items(inode);
4002 
4003         key.objectid = ino;
4004         key.offset = (u64)-1;
4005         key.type = (u8)-1;
4006 
4007 search_again:
4008         path->leave_spinning = 1;
4009         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4010         if (ret < 0) {
4011                 err = ret;
4012                 goto out;
4013         }
4014 
4015         if (ret > 0) {
4016                 /* there are no items in the tree for us to truncate, we're
4017                  * done
4018                  */
4019                 if (path->slots[0] == 0)
4020                         goto out;
4021                 path->slots[0]--;
4022         }
4023 
4024         while (1) {
4025                 fi = NULL;
4026                 leaf = path->nodes[0];
4027                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4028                 found_type = btrfs_key_type(&found_key);
4029 
4030                 if (found_key.objectid != ino)
4031                         break;
4032 
4033                 if (found_type < min_type)
4034                         break;
4035 
4036                 item_end = found_key.offset;
4037                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4038                         fi = btrfs_item_ptr(leaf, path->slots[0],
4039                                             struct btrfs_file_extent_item);
4040                         extent_type = btrfs_file_extent_type(leaf, fi);
4041                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4042                                 item_end +=
4043                                     btrfs_file_extent_num_bytes(leaf, fi);
4044                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4045                                 item_end += btrfs_file_extent_inline_len(leaf,
4046                                                          path->slots[0], fi);
4047                         }
4048                         item_end--;
4049                 }
4050                 if (found_type > min_type) {
4051                         del_item = 1;
4052                 } else {
4053                         if (item_end < new_size)
4054                                 break;
4055                         if (found_key.offset >= new_size)
4056                                 del_item = 1;
4057                         else
4058                                 del_item = 0;
4059                 }
4060                 found_extent = 0;
4061                 /* FIXME, shrink the extent if the ref count is only 1 */
4062                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4063                         goto delete;
4064 
4065                 if (del_item)
4066                         last_size = found_key.offset;
4067                 else
4068                         last_size = new_size;
4069 
4070                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4071                         u64 num_dec;
4072                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4073                         if (!del_item) {
4074                                 u64 orig_num_bytes =
4075                                         btrfs_file_extent_num_bytes(leaf, fi);
4076                                 extent_num_bytes = ALIGN(new_size -
4077                                                 found_key.offset,
4078                                                 root->sectorsize);
4079                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4080                                                          extent_num_bytes);
4081                                 num_dec = (orig_num_bytes -
4082                                            extent_num_bytes);
4083                                 if (root->ref_cows && extent_start != 0)
4084                                         inode_sub_bytes(inode, num_dec);
4085                                 btrfs_mark_buffer_dirty(leaf);
4086                         } else {
4087                                 extent_num_bytes =
4088                                         btrfs_file_extent_disk_num_bytes(leaf,
4089                                                                          fi);
4090                                 extent_offset = found_key.offset -
4091                                         btrfs_file_extent_offset(leaf, fi);
4092 
4093                                 /* FIXME blocksize != 4096 */
4094                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4095                                 if (extent_start != 0) {
4096                                         found_extent = 1;
4097                                         if (root->ref_cows)
4098                                                 inode_sub_bytes(inode, num_dec);
4099                                 }
4100                         }
4101                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4102                         /*
4103                          * we can't truncate inline items that have had
4104                          * special encodings
4105                          */
4106                         if (!del_item &&
4107                             btrfs_file_extent_compression(leaf, fi) == 0 &&
4108                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4109                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4110                                 u32 size = new_size - found_key.offset;
4111 
4112                                 if (root->ref_cows) {
4113                                         inode_sub_bytes(inode, item_end + 1 -
4114                                                         new_size);
4115                                 }
4116 
4117                                 /*
4118                                  * update the ram bytes to properly reflect
4119                                  * the new size of our item
4120                                  */
4121                                 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4122                                 size =
4123                                     btrfs_file_extent_calc_inline_size(size);
4124                                 btrfs_truncate_item(root, path, size, 1);
4125                         } else if (root->ref_cows) {
4126                                 inode_sub_bytes(inode, item_end + 1 -
4127                                                 found_key.offset);
4128                         }
4129                 }
4130 delete:
4131                 if (del_item) {
4132                         if (!pending_del_nr) {
4133                                 /* no pending yet, add ourselves */
4134                                 pending_del_slot = path->slots[0];
4135                                 pending_del_nr = 1;
4136                         } else if (pending_del_nr &&
4137                                    path->slots[0] + 1 == pending_del_slot) {
4138                                 /* hop on the pending chunk */
4139                                 pending_del_nr++;
4140                                 pending_del_slot = path->slots[0];
4141                         } else {
4142                                 BUG();
4143                         }
4144                 } else {
4145                         break;
4146                 }
4147                 if (found_extent && (root->ref_cows ||
4148                                      root == root->fs_info->tree_root)) {
4149                         btrfs_set_path_blocking(path);
4150                         ret = btrfs_free_extent(trans, root, extent_start,
4151                                                 extent_num_bytes, 0,
4152                                                 btrfs_header_owner(leaf),
4153                                                 ino, extent_offset, 0);
4154                         BUG_ON(ret);
4155                 }
4156 
4157                 if (found_type == BTRFS_INODE_ITEM_KEY)
4158                         break;
4159 
4160                 if (path->slots[0] == 0 ||
4161                     path->slots[0] != pending_del_slot) {
4162                         if (pending_del_nr) {
4163                                 ret = btrfs_del_items(trans, root, path,
4164                                                 pending_del_slot,
4165                                                 pending_del_nr);
4166                                 if (ret) {
4167                                         btrfs_abort_transaction(trans,
4168                                                                 root, ret);
4169                                         goto error;
4170                                 }
4171                                 pending_del_nr = 0;
4172                         }
4173                         btrfs_release_path(path);
4174                         goto search_again;
4175                 } else {
4176                         path->slots[0]--;
4177                 }
4178         }
4179 out:
4180         if (pending_del_nr) {
4181                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4182                                       pending_del_nr);
4183                 if (ret)
4184                         btrfs_abort_transaction(trans, root, ret);
4185         }
4186 error:
4187         if (last_size != (u64)-1)
4188                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4189         btrfs_free_path(path);
4190         return err;
4191 }
4192 
4193 /*
4194  * btrfs_truncate_page - read, zero a chunk and write a page
4195  * @inode - inode that we're zeroing
4196  * @from - the offset to start zeroing
4197  * @len - the length to zero, 0 to zero the entire range respective to the
4198  *      offset
4199  * @front - zero up to the offset instead of from the offset on
4200  *
4201  * This will find the page for the "from" offset and cow the page and zero the
4202  * part we want to zero.  This is used with truncate and hole punching.
4203  */
4204 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4205                         int front)
4206 {
4207         struct address_space *mapping = inode->i_mapping;
4208         struct btrfs_root *root = BTRFS_I(inode)->root;
4209         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4210         struct btrfs_ordered_extent *ordered;
4211         struct extent_state *cached_state = NULL;
4212         char *kaddr;
4213         u32 blocksize = root->sectorsize;
4214         pgoff_t index = from >> PAGE_CACHE_SHIFT;
4215         unsigned offset = from & (PAGE_CACHE_SIZE-1);
4216         struct page *page;
4217         gfp_t mask = btrfs_alloc_write_mask(mapping);
4218         int ret = 0;
4219         u64 page_start;
4220         u64 page_end;
4221 
4222         if ((offset & (blocksize - 1)) == 0 &&
4223             (!len || ((len & (blocksize - 1)) == 0)))
4224                 goto out;
4225         ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
4226         if (ret)
4227                 goto out;
4228 
4229 again:
4230         page = find_or_create_page(mapping, index, mask);
4231         if (!page) {
4232                 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4233                 ret = -ENOMEM;
4234                 goto out;
4235         }
4236 
4237         page_start = page_offset(page);
4238         page_end = page_start + PAGE_CACHE_SIZE - 1;
4239 
4240         if (!PageUptodate(page)) {
4241                 ret = btrfs_readpage(NULL, page);
4242                 lock_page(page);
4243                 if (page->mapping != mapping) {
4244                         unlock_page(page);
4245                         page_cache_release(page);
4246                         goto again;
4247                 }
4248                 if (!PageUptodate(page)) {
4249                         ret = -EIO;
4250                         goto out_unlock;
4251                 }
4252         }
4253         wait_on_page_writeback(page);
4254 
4255         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4256         set_page_extent_mapped(page);
4257 
4258         ordered = btrfs_lookup_ordered_extent(inode, page_start);
4259         if (ordered) {
4260                 unlock_extent_cached(io_tree, page_start, page_end,
4261                                      &cached_state, GFP_NOFS);
4262                 unlock_page(page);
4263                 page_cache_release(page);
4264                 btrfs_start_ordered_extent(inode, ordered, 1);
4265                 btrfs_put_ordered_extent(ordered);
4266                 goto again;
4267         }
4268 
4269         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4270                           EXTENT_DIRTY | EXTENT_DELALLOC |
4271                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4272                           0, 0, &cached_state, GFP_NOFS);
4273 
4274         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4275                                         &cached_state);
4276         if (ret) {
4277                 unlock_extent_cached(io_tree, page_start, page_end,
4278                                      &cached_state, GFP_NOFS);
4279                 goto out_unlock;
4280         }
4281 
4282         if (offset != PAGE_CACHE_SIZE) {
4283                 if (!len)
4284                         len = PAGE_CACHE_SIZE - offset;
4285                 kaddr = kmap(page);
4286                 if (front)
4287                         memset(kaddr, 0, offset);
4288                 else
4289                         memset(kaddr + offset, 0, len);
4290                 flush_dcache_page(page);
4291                 kunmap(page);
4292         }
4293         ClearPageChecked(page);
4294         set_page_dirty(page);
4295         unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4296                              GFP_NOFS);
4297 
4298 out_unlock:
4299         if (ret)
4300                 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4301         unlock_page(page);
4302         page_cache_release(page);
4303 out:
4304         return ret;
4305 }
4306 
4307 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4308                              u64 offset, u64 len)
4309 {
4310         struct btrfs_trans_handle *trans;
4311         int ret;
4312 
4313         /*
4314          * Still need to make sure the inode looks like it's been updated so
4315          * that any holes get logged if we fsync.
4316          */
4317         if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4318                 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4319                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4320                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4321                 return 0;
4322         }
4323 
4324         /*
4325          * 1 - for the one we're dropping
4326          * 1 - for the one we're adding
4327          * 1 - for updating the inode.
4328          */
4329         trans = btrfs_start_transaction(root, 3);
4330         if (IS_ERR(trans))
4331                 return PTR_ERR(trans);
4332 
4333         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4334         if (ret) {
4335                 btrfs_abort_transaction(trans, root, ret);
4336                 btrfs_end_transaction(trans, root);
4337                 return ret;
4338         }
4339 
4340         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4341                                        0, 0, len, 0, len, 0, 0, 0);
4342         if (ret)
4343                 btrfs_abort_transaction(trans, root, ret);
4344         else
4345                 btrfs_update_inode(trans, root, inode);
4346         btrfs_end_transaction(trans, root);
4347         return ret;
4348 }
4349 
4350 /*
4351  * This function puts in dummy file extents for the area we're creating a hole
4352  * for.  So if we are truncating this file to a larger size we need to insert
4353  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4354  * the range between oldsize and size
4355  */
4356 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4357 {
4358         struct btrfs_root *root = BTRFS_I(inode)->root;
4359         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4360         struct extent_map *em = NULL;
4361         struct extent_state *cached_state = NULL;
4362         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4363         u64 hole_start = ALIGN(oldsize, root->sectorsize);
4364         u64 block_end = ALIGN(size, root->sectorsize);
4365         u64 last_byte;
4366         u64 cur_offset;
4367         u64 hole_size;
4368         int err = 0;
4369 
4370         /*
4371          * If our size started in the middle of a page we need to zero out the
4372          * rest of the page before we expand the i_size, otherwise we could
4373          * expose stale data.
4374          */
4375         err = btrfs_truncate_page(inode, oldsize, 0, 0);
4376         if (err)
4377                 return err;
4378 
4379         if (size <= hole_start)
4380                 return 0;
4381 
4382         while (1) {
4383                 struct btrfs_ordered_extent *ordered;
4384 
4385                 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4386                                  &cached_state);
4387                 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4388                                                      block_end - hole_start);
4389                 if (!ordered)
4390                         break;
4391                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4392                                      &cached_state, GFP_NOFS);
4393                 btrfs_start_ordered_extent(inode, ordered, 1);
4394                 btrfs_put_ordered_extent(ordered);
4395         }
4396 
4397         cur_offset = hole_start;
4398         while (1) {
4399                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4400                                 block_end - cur_offset, 0);
4401                 if (IS_ERR(em)) {
4402                         err = PTR_ERR(em);
4403                         em = NULL;
4404                         break;
4405                 }
4406                 last_byte = min(extent_map_end(em), block_end);
4407                 last_byte = ALIGN(last_byte , root->sectorsize);
4408                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4409                         struct extent_map *hole_em;
4410                         hole_size = last_byte - cur_offset;
4411 
4412                         err = maybe_insert_hole(root, inode, cur_offset,
4413                                                 hole_size);
4414                         if (err)
4415                                 break;
4416                         btrfs_drop_extent_cache(inode, cur_offset,
4417                                                 cur_offset + hole_size - 1, 0);
4418                         hole_em = alloc_extent_map();
4419                         if (!hole_em) {
4420                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4421                                         &BTRFS_I(inode)->runtime_flags);
4422                                 goto next;
4423                         }
4424                         hole_em->start = cur_offset;
4425                         hole_em->len = hole_size;
4426                         hole_em->orig_start = cur_offset;
4427 
4428                         hole_em->block_start = EXTENT_MAP_HOLE;
4429                         hole_em->block_len = 0;
4430                         hole_em->orig_block_len = 0;
4431                         hole_em->ram_bytes = hole_size;
4432                         hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4433                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4434                         hole_em->generation = root->fs_info->generation;
4435 
4436                         while (1) {
4437                                 write_lock(&em_tree->lock);
4438                                 err = add_extent_mapping(em_tree, hole_em, 1);
4439                                 write_unlock(&em_tree->lock);
4440                                 if (err != -EEXIST)
4441                                         break;
4442                                 btrfs_drop_extent_cache(inode, cur_offset,
4443                                                         cur_offset +
4444                                                         hole_size - 1, 0);
4445                         }
4446                         free_extent_map(hole_em);
4447                 }
4448 next:
4449                 free_extent_map(em);
4450                 em = NULL;
4451                 cur_offset = last_byte;
4452                 if (cur_offset >= block_end)
4453                         break;
4454         }
4455         free_extent_map(em);
4456         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4457                              GFP_NOFS);
4458         return err;
4459 }
4460 
4461 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4462 {
4463         struct btrfs_root *root = BTRFS_I(inode)->root;
4464         struct btrfs_trans_handle *trans;
4465         loff_t oldsize = i_size_read(inode);
4466         loff_t newsize = attr->ia_size;
4467         int mask = attr->ia_valid;
4468         int ret;
4469 
4470         /*
4471          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4472          * special case where we need to update the times despite not having
4473          * these flags set.  For all other operations the VFS set these flags
4474          * explicitly if it wants a timestamp update.
4475          */
4476         if (newsize != oldsize) {
4477                 inode_inc_iversion(inode);
4478                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4479                         inode->i_ctime = inode->i_mtime =
4480                                 current_fs_time(inode->i_sb);
4481         }
4482 
4483         if (newsize > oldsize) {
4484                 truncate_pagecache(inode, newsize);
4485                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4486                 if (ret)
4487                         return ret;
4488 
4489                 trans = btrfs_start_transaction(root, 1);
4490                 if (IS_ERR(trans))
4491                         return PTR_ERR(trans);
4492 
4493                 i_size_write(inode, newsize);
4494                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4495                 ret = btrfs_update_inode(trans, root, inode);
4496                 btrfs_end_transaction(trans, root);
4497         } else {
4498 
4499                 /*
4500                  * We're truncating a file that used to have good data down to
4501                  * zero. Make sure it gets into the ordered flush list so that
4502                  * any new writes get down to disk quickly.
4503                  */
4504                 if (newsize == 0)
4505                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4506                                 &BTRFS_I(inode)->runtime_flags);
4507 
4508                 /*
4509                  * 1 for the orphan item we're going to add
4510                  * 1 for the orphan item deletion.
4511                  */
4512                 trans = btrfs_start_transaction(root, 2);
4513                 if (IS_ERR(trans))
4514                         return PTR_ERR(trans);
4515 
4516                 /*
4517                  * We need to do this in case we fail at _any_ point during the
4518                  * actual truncate.  Once we do the truncate_setsize we could
4519                  * invalidate pages which forces any outstanding ordered io to
4520                  * be instantly completed which will give us extents that need
4521                  * to be truncated.  If we fail to get an orphan inode down we
4522                  * could have left over extents that were never meant to live,
4523                  * so we need to garuntee from this point on that everything
4524                  * will be consistent.
4525                  */
4526                 ret = btrfs_orphan_add(trans, inode);
4527                 btrfs_end_transaction(trans, root);
4528                 if (ret)
4529                         return ret;
4530 
4531                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4532                 truncate_setsize(inode, newsize);
4533 
4534                 /* Disable nonlocked read DIO to avoid the end less truncate */
4535                 btrfs_inode_block_unlocked_dio(inode);
4536                 inode_dio_wait(inode);
4537                 btrfs_inode_resume_unlocked_dio(inode);
4538 
4539                 ret = btrfs_truncate(inode);
4540                 if (ret && inode->i_nlink) {
4541                         int err;
4542 
4543                         /*
4544                          * failed to truncate, disk_i_size is only adjusted down
4545                          * as we remove extents, so it should represent the true
4546                          * size of the inode, so reset the in memory size and
4547                          * delete our orphan entry.
4548                          */
4549                         trans = btrfs_join_transaction(root);
4550                         if (IS_ERR(trans)) {
4551                                 btrfs_orphan_del(NULL, inode);
4552                                 return ret;
4553                         }
4554                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
4555                         err = btrfs_orphan_del(trans, inode);
4556                         if (err)
4557                                 btrfs_abort_transaction(trans, root, err);
4558                         btrfs_end_transaction(trans, root);
4559                 }
4560         }
4561 
4562         return ret;
4563 }
4564 
4565 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4566 {
4567         struct inode *inode = dentry->d_inode;
4568         struct btrfs_root *root = BTRFS_I(inode)->root;
4569         int err;
4570 
4571         if (btrfs_root_readonly(root))
4572                 return -EROFS;
4573 
4574         err = inode_change_ok(inode, attr);
4575         if (err)
4576                 return err;
4577 
4578         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
4579                 err = btrfs_setsize(inode, attr);
4580                 if (err)
4581                         return err;
4582         }
4583 
4584         if (attr->ia_valid) {
4585                 setattr_copy(inode, attr);
4586                 inode_inc_iversion(inode);
4587                 err = btrfs_dirty_inode(inode);
4588 
4589                 if (!err && attr->ia_valid & ATTR_MODE)
4590                         err = posix_acl_chmod(inode, inode->i_mode);
4591         }
4592 
4593         return err;
4594 }
4595 
4596 /*
4597  * While truncating the inode pages during eviction, we get the VFS calling
4598  * btrfs_invalidatepage() against each page of the inode. This is slow because
4599  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
4600  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
4601  * extent_state structures over and over, wasting lots of time.
4602  *
4603  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
4604  * those expensive operations on a per page basis and do only the ordered io
4605  * finishing, while we release here the extent_map and extent_state structures,
4606  * without the excessive merging and splitting.
4607  */
4608 static void evict_inode_truncate_pages(struct inode *inode)
4609 {
4610         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4611         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
4612         struct rb_node *node;
4613 
4614         ASSERT(inode->i_state & I_FREEING);
4615         truncate_inode_pages(&inode->i_data, 0);
4616 
4617         write_lock(&map_tree->lock);
4618         while (!RB_EMPTY_ROOT(&map_tree->map)) {
4619                 struct extent_map *em;
4620 
4621                 node = rb_first(&map_tree->map);
4622                 em = rb_entry(node, struct extent_map, rb_node);
4623                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
4624                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
4625                 remove_extent_mapping(map_tree, em);
4626                 free_extent_map(em);
4627         }
4628         write_unlock(&map_tree->lock);
4629 
4630         spin_lock(&io_tree->lock);
4631         while (!RB_EMPTY_ROOT(&io_tree->state)) {
4632                 struct extent_state *state;
4633                 struct extent_state *cached_state = NULL;
4634 
4635                 node = rb_first(&io_tree->state);
4636                 state = rb_entry(node, struct extent_state, rb_node);
4637                 atomic_inc(&state->refs);
4638                 spin_unlock(&io_tree->lock);
4639 
4640                 lock_extent_bits(io_tree, state->start, state->end,
4641                                  0, &cached_state);
4642                 clear_extent_bit(io_tree, state->start, state->end,
4643                                  EXTENT_LOCKED | EXTENT_DIRTY |
4644                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
4645                                  EXTENT_DEFRAG, 1, 1,
4646                                  &cached_state, GFP_NOFS);
4647                 free_extent_state(state);
4648 
4649                 spin_lock(&io_tree->lock);
4650         }
4651         spin_unlock(&io_tree->lock);
4652 }
4653 
4654 void btrfs_evict_inode(struct inode *inode)
4655 {
4656         struct btrfs_trans_handle *trans;
4657         struct btrfs_root *root = BTRFS_I(inode)->root;
4658         struct btrfs_block_rsv *rsv, *global_rsv;
4659         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
4660         int ret;
4661 
4662         trace_btrfs_inode_evict(inode);
4663 
4664         evict_inode_truncate_pages(inode);
4665 
4666         if (inode->i_nlink &&
4667             ((btrfs_root_refs(&root->root_item) != 0 &&
4668               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
4669              btrfs_is_free_space_inode(inode)))
4670                 goto no_delete;
4671 
4672         if (is_bad_inode(inode)) {
4673                 btrfs_orphan_del(NULL, inode);
4674                 goto no_delete;
4675         }
4676         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4677         if (!special_file(inode->i_mode))
4678                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
4679 
4680         if (root->fs_info->log_root_recovering) {
4681                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
4682                                  &BTRFS_I(inode)->runtime_flags));
4683                 goto no_delete;
4684         }
4685 
4686         if (inode->i_nlink > 0) {
4687                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
4688                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
4689                 goto no_delete;
4690         }
4691 
4692         ret = btrfs_commit_inode_delayed_inode(inode);
4693         if (ret) {
4694                 btrfs_orphan_del(NULL, inode);
4695                 goto no_delete;
4696         }
4697 
4698         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
4699         if (!rsv) {
4700                 btrfs_orphan_del(NULL, inode);
4701                 goto no_delete;
4702         }
4703         rsv->size = min_size;
4704         rsv->failfast = 1;
4705         global_rsv = &root->fs_info->global_block_rsv;
4706 
4707         btrfs_i_size_write(inode, 0);
4708 
4709         /*
4710          * This is a bit simpler than btrfs_truncate since we've already
4711          * reserved our space for our orphan item in the unlink, so we just
4712          * need to reserve some slack space in case we add bytes and update
4713          * inode item when doing the truncate.
4714          */
4715         while (1) {
4716                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
4717                                              BTRFS_RESERVE_FLUSH_LIMIT);
4718 
4719                 /*
4720                  * Try and steal from the global reserve since we will
4721                  * likely not use this space anyway, we want to try as
4722                  * hard as possible to get this to work.
4723                  */
4724                 if (ret)
4725                         ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
4726 
4727                 if (ret) {
4728                         btrfs_warn(root->fs_info,
4729                                 "Could not get space for a delete, will truncate on mount %d",
4730                                 ret);
4731                         btrfs_orphan_del(NULL, inode);
4732                         btrfs_free_block_rsv(root, rsv);
4733                         goto no_delete;
4734                 }
4735 
4736                 trans = btrfs_join_transaction(root);
4737                 if (IS_ERR(trans)) {
4738                         btrfs_orphan_del(NULL, inode);
4739                         btrfs_free_block_rsv(root, rsv);
4740                         goto no_delete;
4741                 }
4742 
4743                 trans->block_rsv = rsv;
4744 
4745                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
4746                 if (ret != -ENOSPC)
4747                         break;
4748 
4749                 trans->block_rsv = &root->fs_info->trans_block_rsv;
4750                 btrfs_end_transaction(trans, root);
4751                 trans = NULL;
4752                 btrfs_btree_balance_dirty(root);
4753         }
4754 
4755         btrfs_free_block_rsv(root, rsv);
4756 
4757         /*
4758          * Errors here aren't a big deal, it just means we leave orphan items
4759          * in the tree.  They will be cleaned up on the next mount.
4760          */
4761         if (ret == 0) {
4762                 trans->block_rsv = root->orphan_block_rsv;
4763                 btrfs_orphan_del(trans, inode);
4764         } else {
4765                 btrfs_orphan_del(NULL, inode);
4766         }
4767 
4768         trans->block_rsv = &root->fs_info->trans_block_rsv;
4769         if (!(root == root->fs_info->tree_root ||
4770               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
4771                 btrfs_return_ino(root, btrfs_ino(inode));
4772 
4773         btrfs_end_transaction(trans, root);
4774         btrfs_btree_balance_dirty(root);
4775 no_delete:
4776         btrfs_remove_delayed_node(inode);
4777         clear_inode(inode);
4778         return;
4779 }
4780 
4781 /*
4782  * this returns the key found in the dir entry in the location pointer.
4783  * If no dir entries were found, location->objectid is 0.
4784  */
4785 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4786                                struct btrfs_key *location)
4787 {
4788         const char *name = dentry->d_name.name;
4789         int namelen = dentry->d_name.len;
4790         struct btrfs_dir_item *di;
4791         struct btrfs_path *path;
4792         struct btrfs_root *root = BTRFS_I(dir)->root;
4793         int ret = 0;
4794 
4795         path = btrfs_alloc_path();
4796         if (!path)
4797                 return -ENOMEM;
4798 
4799         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
4800                                     namelen, 0);
4801         if (IS_ERR(di))
4802                 ret = PTR_ERR(di);
4803 
4804         if (IS_ERR_OR_NULL(di))
4805                 goto out_err;
4806 
4807         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
4808 out:
4809         btrfs_free_path(path);
4810         return ret;
4811 out_err:
4812         location->objectid = 0;
4813         goto out;
4814 }
4815 
4816 /*
4817  * when we hit a tree root in a directory, the btrfs part of the inode
4818  * needs to be changed to reflect the root directory of the tree root.  This
4819  * is kind of like crossing a mount point.
4820  */
4821 static int fixup_tree_root_location(struct btrfs_root *root,
4822                                     struct inode *dir,
4823                                     struct dentry *dentry,
4824                                     struct btrfs_key *location,
4825                                     struct btrfs_root **sub_root)
4826 {
4827         struct btrfs_path *path;
4828         struct btrfs_root *new_root;
4829         struct btrfs_root_ref *ref;
4830         struct extent_buffer *leaf;
4831         int ret;
4832         int err = 0;
4833 
4834         path = btrfs_alloc_path();
4835         if (!path) {
4836                 err = -ENOMEM;
4837                 goto out;
4838         }
4839 
4840         err = -ENOENT;
4841         ret = btrfs_find_item(root->fs_info->tree_root, path,
4842                                 BTRFS_I(dir)->root->root_key.objectid,
4843                                 location->objectid, BTRFS_ROOT_REF_KEY, NULL);
4844         if (ret) {
4845                 if (ret < 0)
4846                         err = ret;
4847                 goto out;
4848         }
4849 
4850         leaf = path->nodes[0];
4851         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
4852         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4853             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4854                 goto out;
4855 
4856         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4857                                    (unsigned long)(ref + 1),
4858                                    dentry->d_name.len);
4859         if (ret)
4860                 goto out;
4861 
4862         btrfs_release_path(path);
4863 
4864         new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4865         if (IS_ERR(new_root)) {
4866                 err = PTR_ERR(new_root);
4867                 goto out;
4868         }
4869 
4870         *sub_root = new_root;
4871         location->objectid = btrfs_root_dirid(&new_root->root_item);
4872         location->type = BTRFS_INODE_ITEM_KEY;
4873         location->offset = 0;
4874         err = 0;
4875 out:
4876         btrfs_free_path(path);
4877         return err;
4878 }
4879 
4880 static void inode_tree_add(struct inode *inode)
4881 {
4882         struct btrfs_root *root = BTRFS_I(inode)->root;
4883         struct btrfs_inode *entry;
4884         struct rb_node **p;
4885         struct rb_node *parent;
4886         struct rb_node *new = &BTRFS_I(inode)->rb_node;
4887         u64 ino = btrfs_ino(inode);
4888 
4889         if (inode_unhashed(inode))
4890                 return;
4891         parent = NULL;
4892         spin_lock(&root->inode_lock);
4893         p = &root->inode_tree.rb_node;
4894         while (*p) {
4895                 parent = *p;
4896                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
4897 
4898                 if (ino < btrfs_ino(&entry->vfs_inode))
4899                         p = &parent->rb_left;
4900                 else if (ino > btrfs_ino(&entry->vfs_inode))
4901                         p = &parent->rb_right;
4902                 else {
4903                         WARN_ON(!(entry->vfs_inode.i_state &
4904                                   (I_WILL_FREE | I_FREEING)));
4905                         rb_replace_node(parent, new, &root->inode_tree);
4906                         RB_CLEAR_NODE(parent);
4907                         spin_unlock(&root->inode_lock);
4908                         return;
4909                 }
4910         }
4911         rb_link_node(new, parent, p);
4912         rb_insert_color(new, &root->inode_tree);
4913         spin_unlock(&root->inode_lock);
4914 }
4915 
4916 static void inode_tree_del(struct inode *inode)
4917 {
4918         struct btrfs_root *root = BTRFS_I(inode)->root;
4919         int empty = 0;
4920 
4921         spin_lock(&root->inode_lock);
4922         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
4923                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4924                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4925                 empty = RB_EMPTY_ROOT(&root->inode_tree);
4926         }
4927         spin_unlock(&root->inode_lock);
4928 
4929         if (empty && btrfs_root_refs(&root->root_item) == 0) {
4930                 synchronize_srcu(&root->fs_info->subvol_srcu);
4931                 spin_lock(&root->inode_lock);
4932                 empty = RB_EMPTY_ROOT(&root->inode_tree);
4933                 spin_unlock(&root->inode_lock);
4934                 if (empty)
4935                         btrfs_add_dead_root(root);
4936         }
4937 }
4938 
4939 void btrfs_invalidate_inodes(struct btrfs_root *root)
4940 {
4941         struct rb_node *node;
4942         struct rb_node *prev;
4943         struct btrfs_inode *entry;
4944         struct inode *inode;
4945         u64 objectid = 0;
4946 
4947         WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4948 
4949         spin_lock(&root->inode_lock);
4950 again:
4951         node = root->inode_tree.rb_node;
4952         prev = NULL;
4953         while (node) {
4954                 prev = node;
4955                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4956 
4957                 if (objectid < btrfs_ino(&entry->vfs_inode))
4958                         node = node->rb_left;
4959                 else if (objectid > btrfs_ino(&entry->vfs_inode))
4960                         node = node->rb_right;
4961                 else
4962                         break;
4963         }
4964         if (!node) {
4965                 while (prev) {
4966                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
4967                         if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4968                                 node = prev;
4969                                 break;
4970                         }
4971                         prev = rb_next(prev);
4972                 }
4973         }
4974         while (node) {
4975                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4976                 objectid = btrfs_ino(&entry->vfs_inode) + 1;
4977                 inode = igrab(&entry->vfs_inode);
4978                 if (inode) {
4979                         spin_unlock(&root->inode_lock);
4980                         if (atomic_read(&inode->i_count) > 1)
4981                                 d_prune_aliases(inode);
4982                         /*
4983                          * btrfs_drop_inode will have it removed from
4984                          * the inode cache when its usage count
4985                          * hits zero.
4986                          */
4987                         iput(inode);
4988                         cond_resched();
4989                         spin_lock(&root->inode_lock);
4990                         goto again;
4991                 }
4992 
4993                 if (cond_resched_lock(&root->inode_lock))
4994                         goto again;
4995 
4996                 node = rb_next(node);
4997         }
4998         spin_unlock(&root->inode_lock);
4999 }
5000 
5001 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5002 {
5003         struct btrfs_iget_args *args = p;
5004         inode->i_ino = args->location->objectid;
5005         memcpy(&BTRFS_I(inode)->location, args->location,
5006                sizeof(*args->location));
5007         BTRFS_I(inode)->root = args->root;
5008         return 0;
5009 }
5010 
5011 static int btrfs_find_actor(struct inode *inode, void *opaque)
5012 {
5013         struct btrfs_iget_args *args = opaque;
5014         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5015                 args->root == BTRFS_I(inode)->root;
5016 }
5017 
5018 static struct inode *btrfs_iget_locked(struct super_block *s,
5019                                        struct btrfs_key *location,
5020                                        struct btrfs_root *root)
5021 {
5022         struct inode *inode;
5023         struct btrfs_iget_args args;
5024         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5025 
5026         args.location = location;
5027         args.root = root;
5028 
5029         inode = iget5_locked(s, hashval, btrfs_find_actor,
5030                              btrfs_init_locked_inode,
5031                              (void *)&args);
5032         return inode;
5033 }
5034 
5035 /* Get an inode object given its location and corresponding root.
5036  * Returns in *is_new if the inode was read from disk
5037  */
5038 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5039                          struct btrfs_root *root, int *new)
5040 {
5041         struct inode *inode;
5042 
5043         inode = btrfs_iget_locked(s, location, root);
5044         if (!inode)
5045                 return ERR_PTR(-ENOMEM);
5046 
5047         if (inode->i_state & I_NEW) {
5048                 btrfs_read_locked_inode(inode);
5049                 if (!is_bad_inode(inode)) {
5050                         inode_tree_add(inode);
5051                         unlock_new_inode(inode);
5052                         if (new)
5053                                 *new = 1;
5054                 } else {
5055                         unlock_new_inode(inode);
5056                         iput(inode);
5057                         inode = ERR_PTR(-ESTALE);
5058                 }
5059         }
5060 
5061         return inode;
5062 }
5063 
5064 static struct inode *new_simple_dir(struct super_block *s,
5065                                     struct btrfs_key *key,
5066                                     struct btrfs_root *root)
5067 {
5068         struct inode *inode = new_inode(s);
5069 
5070         if (!inode)
5071                 return ERR_PTR(-ENOMEM);
5072 
5073         BTRFS_I(inode)->root = root;
5074         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5075         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5076 
5077         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5078         inode->i_op = &btrfs_dir_ro_inode_operations;
5079         inode->i_fop = &simple_dir_operations;
5080         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5081         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5082 
5083         return inode;
5084 }
5085 
5086 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5087 {
5088         struct inode *inode;
5089         struct btrfs_root *root = BTRFS_I(dir)->root;
5090         struct btrfs_root *sub_root = root;
5091         struct btrfs_key location;
5092         int index;
5093         int ret = 0;
5094 
5095         if (dentry->d_name.len > BTRFS_NAME_LEN)
5096                 return ERR_PTR(-ENAMETOOLONG);
5097 
5098         ret = btrfs_inode_by_name(dir, dentry, &location);
5099         if (ret < 0)
5100                 return ERR_PTR(ret);
5101 
5102         if (location.objectid == 0)
5103                 return ERR_PTR(-ENOENT);
5104 
5105         if (location.type == BTRFS_INODE_ITEM_KEY) {
5106                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5107                 return inode;
5108         }
5109 
5110         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5111 
5112         index = srcu_read_lock(&root->fs_info->subvol_srcu);
5113         ret = fixup_tree_root_location(root, dir, dentry,
5114                                        &location, &sub_root);
5115         if (ret < 0) {
5116                 if (ret != -ENOENT)
5117                         inode = ERR_PTR(ret);
5118                 else
5119                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5120         } else {
5121                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5122         }
5123         srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5124 
5125         if (!IS_ERR(inode) && root != sub_root) {
5126                 down_read(&root->fs_info->cleanup_work_sem);
5127                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5128                         ret = btrfs_orphan_cleanup(sub_root);
5129                 up_read(&root->fs_info->cleanup_work_sem);
5130                 if (ret) {
5131                         iput(inode);
5132                         inode = ERR_PTR(ret);
5133                 }
5134         }
5135 
5136         return inode;
5137 }
5138 
5139 static int btrfs_dentry_delete(const struct dentry *dentry)
5140 {
5141         struct btrfs_root *root;
5142         struct inode *inode = dentry->d_inode;
5143 
5144         if (!inode && !IS_ROOT(dentry))
5145                 inode = dentry->d_parent->d_inode;
5146 
5147         if (inode) {
5148                 root = BTRFS_I(inode)->root;
5149                 if (btrfs_root_refs(&root->root_item) == 0)
5150                         return 1;
5151 
5152                 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5153                         return 1;
5154         }
5155         return 0;
5156 }
5157 
5158 static void btrfs_dentry_release(struct dentry *dentry)
5159 {
5160         if (dentry->d_fsdata)
5161                 kfree(dentry->d_fsdata);
5162 }
5163 
5164 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5165                                    unsigned int flags)
5166 {
5167         struct inode *inode;
5168 
5169         inode = btrfs_lookup_dentry(dir, dentry);
5170         if (IS_ERR(inode)) {
5171                 if (PTR_ERR(inode) == -ENOENT)
5172                         inode = NULL;
5173                 else
5174                         return ERR_CAST(inode);
5175         }
5176 
5177         return d_materialise_unique(dentry, inode);
5178 }
5179 
5180 unsigned char btrfs_filetype_table[] = {
5181         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5182 };
5183 
5184 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5185 {
5186         struct inode *inode = file_inode(file);
5187         struct btrfs_root *root = BTRFS_I(inode)->root;
5188         struct btrfs_item *item;
5189         struct btrfs_dir_item *di;
5190         struct btrfs_key key;
5191         struct btrfs_key found_key;
5192         struct btrfs_path *path;
5193         struct list_head ins_list;
5194         struct list_head del_list;
5195         int ret;
5196         struct extent_buffer *leaf;
5197         int slot;
5198         unsigned char d_type;
5199         int over = 0;
5200         u32 di_cur;
5201         u32 di_total;
5202         u32 di_len;
5203         int key_type = BTRFS_DIR_INDEX_KEY;
5204         char tmp_name[32];
5205         char *name_ptr;
5206         int name_len;
5207         int is_curr = 0;        /* ctx->pos points to the current index? */
5208         bool emitted;
5209 
5210         /* FIXME, use a real flag for deciding about the key type */
5211         if (root->fs_info->tree_root == root)
5212                 key_type = BTRFS_DIR_ITEM_KEY;
5213 
5214         if (!dir_emit_dots(file, ctx))
5215                 return 0;
5216 
5217         path = btrfs_alloc_path();
5218         if (!path)
5219                 return -ENOMEM;
5220 
5221         path->reada = 1;
5222 
5223         if (key_type == BTRFS_DIR_INDEX_KEY) {
5224                 INIT_LIST_HEAD(&ins_list);
5225                 INIT_LIST_HEAD(&del_list);
5226                 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5227         }
5228 
5229         btrfs_set_key_type(&key, key_type);
5230         key.offset = ctx->pos;
5231         key.objectid = btrfs_ino(inode);
5232 
5233         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5234         if (ret < 0)
5235                 goto err;
5236 
5237         emitted = false;
5238         while (1) {
5239                 leaf = path->nodes[0];
5240                 slot = path->slots[0];
5241                 if (slot >= btrfs_header_nritems(leaf)) {
5242                         ret = btrfs_next_leaf(root, path);
5243                         if (ret < 0)
5244                                 goto err;
5245                         else if (ret > 0)
5246                                 break;
5247                         continue;
5248                 }
5249 
5250                 item = btrfs_item_nr(slot);
5251                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5252 
5253                 if (found_key.objectid != key.objectid)
5254                         break;
5255                 if (btrfs_key_type(&found_key) != key_type)
5256                         break;
5257                 if (found_key.offset < ctx->pos)
5258                         goto next;
5259                 if (key_type == BTRFS_DIR_INDEX_KEY &&
5260                     btrfs_should_delete_dir_index(&del_list,
5261                                                   found_key.offset))
5262                         goto next;
5263 
5264                 ctx->pos = found_key.offset;
5265                 is_curr = 1;
5266 
5267                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5268                 di_cur = 0;
5269                 di_total = btrfs_item_size(leaf, item);
5270 
5271                 while (di_cur < di_total) {
5272                         struct btrfs_key location;
5273 
5274                         if (verify_dir_item(root, leaf, di))
5275                                 break;
5276 
5277                         name_len = btrfs_dir_name_len(leaf, di);
5278                         if (name_len <= sizeof(tmp_name)) {
5279                                 name_ptr = tmp_name;
5280                         } else {
5281                                 name_ptr = kmalloc(name_len, GFP_NOFS);
5282                                 if (!name_ptr) {
5283                                         ret = -ENOMEM;
5284                                         goto err;
5285                                 }
5286                         }
5287                         read_extent_buffer(leaf, name_ptr,
5288                                            (unsigned long)(di + 1), name_len);
5289 
5290                         d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5291                         btrfs_dir_item_key_to_cpu(leaf, di, &location);
5292 
5293 
5294                         /* is this a reference to our own snapshot? If so
5295                          * skip it.
5296                          *
5297                          * In contrast to old kernels, we insert the snapshot's
5298                          * dir item and dir index after it has been created, so
5299                          * we won't find a reference to our own snapshot. We
5300                          * still keep the following code for backward
5301                          * compatibility.
5302                          */
5303                         if (location.type == BTRFS_ROOT_ITEM_KEY &&
5304                             location.objectid == root->root_key.objectid) {
5305                                 over = 0;
5306                                 goto skip;
5307                         }
5308                         over = !dir_emit(ctx, name_ptr, name_len,
5309                                        location.objectid, d_type);
5310 
5311 skip:
5312                         if (name_ptr != tmp_name)
5313                                 kfree(name_ptr);
5314 
5315                         if (over)
5316                                 goto nopos;
5317                         emitted = true;
5318                         di_len = btrfs_dir_name_len(leaf, di) +
5319                                  btrfs_dir_data_len(leaf, di) + sizeof(*di);
5320                         di_cur += di_len;
5321                         di = (struct btrfs_dir_item *)((char *)di + di_len);
5322                 }
5323 next:
5324                 path->slots[0]++;
5325         }
5326 
5327         if (key_type == BTRFS_DIR_INDEX_KEY) {
5328                 if (is_curr)
5329                         ctx->pos++;
5330                 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
5331                 if (ret)
5332                         goto nopos;
5333         }
5334 
5335         /*
5336          * If we haven't emitted any dir entry, we must not touch ctx->pos as
5337          * it was was set to the termination value in previous call. We assume
5338          * that "." and ".." were emitted if we reach this point and set the
5339          * termination value as well for an empty directory.
5340          */
5341         if (ctx->pos > 2 && !emitted)
5342                 goto nopos;
5343 
5344         /* Reached end of directory/root. Bump pos past the last item. */
5345         ctx->pos++;
5346 
5347         /*
5348          * Stop new entries from being returned after we return the last
5349          * entry.
5350          *
5351          * New directory entries are assigned a strictly increasing
5352          * offset.  This means that new entries created during readdir
5353          * are *guaranteed* to be seen in the future by that readdir.
5354          * This has broken buggy programs which operate on names as
5355          * they're returned by readdir.  Until we re-use freed offsets
5356          * we have this hack to stop new entries from being returned
5357          * under the assumption that they'll never reach this huge
5358          * offset.
5359          *
5360          * This is being careful not to overflow 32bit loff_t unless the
5361          * last entry requires it because doing so has broken 32bit apps
5362          * in the past.
5363          */
5364         if (key_type == BTRFS_DIR_INDEX_KEY) {
5365                 if (ctx->pos >= INT_MAX)
5366                         ctx->pos = LLONG_MAX;
5367                 else
5368                         ctx->pos = INT_MAX;
5369         }
5370 nopos:
5371         ret = 0;
5372 err:
5373         if (key_type == BTRFS_DIR_INDEX_KEY)
5374                 btrfs_put_delayed_items(&ins_list, &del_list);
5375         btrfs_free_path(path);
5376         return ret;
5377 }
5378 
5379 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5380 {
5381         struct btrfs_root *root = BTRFS_I(inode)->root;
5382         struct btrfs_trans_handle *trans;
5383         int ret = 0;
5384         bool nolock = false;
5385 
5386         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5387                 return 0;
5388 
5389         if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5390                 nolock = true;
5391 
5392         if (wbc->sync_mode == WB_SYNC_ALL) {
5393                 if (nolock)
5394                         trans = btrfs_join_transaction_nolock(root);
5395                 else
5396                         trans = btrfs_join_transaction(root);
5397                 if (IS_ERR(trans))
5398                         return PTR_ERR(trans);
5399                 ret = btrfs_commit_transaction(trans, root);
5400         }
5401         return ret;
5402 }
5403 
5404 /*
5405  * This is somewhat expensive, updating the tree every time the
5406  * inode changes.  But, it is most likely to find the inode in cache.
5407  * FIXME, needs more benchmarking...there are no reasons other than performance
5408  * to keep or drop this code.
5409  */
5410 static int btrfs_dirty_inode(struct inode *inode)
5411 {
5412         struct btrfs_root *root = BTRFS_I(inode)->root;
5413         struct btrfs_trans_handle *trans;
5414         int ret;
5415 
5416         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5417                 return 0;
5418 
5419         trans = btrfs_join_transaction(root);
5420         if (IS_ERR(trans))
5421                 return PTR_ERR(trans);
5422 
5423         ret = btrfs_update_inode(trans, root, inode);
5424         if (ret && ret == -ENOSPC) {
5425                 /* whoops, lets try again with the full transaction */
5426                 btrfs_end_transaction(trans, root);
5427                 trans = btrfs_start_transaction(root, 1);
5428                 if (IS_ERR(trans))
5429                         return PTR_ERR(trans);
5430 
5431                 ret = btrfs_update_inode(trans, root, inode);
5432         }
5433         btrfs_end_transaction(trans, root);
5434         if (BTRFS_I(inode)->delayed_node)
5435                 btrfs_balance_delayed_items(root);
5436 
5437         return ret;
5438 }
5439 
5440 /*
5441  * This is a copy of file_update_time.  We need this so we can return error on
5442  * ENOSPC for updating the inode in the case of file write and mmap writes.
5443  */
5444 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5445                              int flags)
5446 {
5447         struct btrfs_root *root = BTRFS_I(inode)->root;
5448 
5449         if (btrfs_root_readonly(root))
5450                 return -EROFS;
5451 
5452         if (flags & S_VERSION)
5453                 inode_inc_iversion(inode);
5454         if (flags & S_CTIME)
5455                 inode->i_ctime = *now;
5456         if (flags & S_MTIME)
5457                 inode->i_mtime = *now;
5458         if (flags & S_ATIME)
5459                 inode->i_atime = *now;
5460         return btrfs_dirty_inode(inode);
5461 }
5462 
5463 /*
5464  * find the highest existing sequence number in a directory
5465  * and then set the in-memory index_cnt variable to reflect
5466  * free sequence numbers
5467  */
5468 static int btrfs_set_inode_index_count(struct inode *inode)
5469 {
5470         struct btrfs_root *root = BTRFS_I(inode)->root;
5471         struct btrfs_key key, found_key;
5472         struct btrfs_path *path;
5473         struct extent_buffer *leaf;
5474         int ret;
5475 
5476         key.objectid = btrfs_ino(inode);
5477         btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
5478         key.offset = (u64)-1;
5479 
5480         path = btrfs_alloc_path();
5481         if (!path)
5482                 return -ENOMEM;
5483 
5484         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5485         if (ret < 0)
5486                 goto out;
5487         /* FIXME: we should be able to handle this */
5488         if (ret == 0)
5489                 goto out;
5490         ret = 0;
5491 
5492         /*
5493          * MAGIC NUMBER EXPLANATION:
5494          * since we search a directory based on f_pos we have to start at 2
5495          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5496          * else has to start at 2
5497          */
5498         if (path->slots[0] == 0) {
5499                 BTRFS_I(inode)->index_cnt = 2;
5500                 goto out;
5501         }
5502 
5503         path->slots[0]--;
5504 
5505         leaf = path->nodes[0];
5506         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5507 
5508         if (found_key.objectid != btrfs_ino(inode) ||
5509             btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
5510                 BTRFS_I(inode)->index_cnt = 2;
5511                 goto out;
5512         }
5513 
5514         BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5515 out:
5516         btrfs_free_path(path);
5517         return ret;
5518 }
5519 
5520 /*
5521  * helper to find a free sequence number in a given directory.  This current
5522  * code is very simple, later versions will do smarter things in the btree
5523  */
5524 int btrfs_set_inode_index(struct inode *dir, u64 *index)
5525 {
5526         int ret = 0;
5527 
5528         if (BTRFS_I(dir)->index_cnt == (u64)-1) {
5529                 ret = btrfs_inode_delayed_dir_index_count(