~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/btrfs/disk-io.c

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2007 Oracle.  All rights reserved.
  3  *
  4  * This program is free software; you can redistribute it and/or
  5  * modify it under the terms of the GNU General Public
  6  * License v2 as published by the Free Software Foundation.
  7  *
  8  * This program is distributed in the hope that it will be useful,
  9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11  * General Public License for more details.
 12  *
 13  * You should have received a copy of the GNU General Public
 14  * License along with this program; if not, write to the
 15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16  * Boston, MA 021110-1307, USA.
 17  */
 18 
 19 #include <linux/fs.h>
 20 #include <linux/blkdev.h>
 21 #include <linux/scatterlist.h>
 22 #include <linux/swap.h>
 23 #include <linux/radix-tree.h>
 24 #include <linux/writeback.h>
 25 #include <linux/buffer_head.h>
 26 #include <linux/workqueue.h>
 27 #include <linux/kthread.h>
 28 #include <linux/freezer.h>
 29 #include <linux/crc32c.h>
 30 #include <linux/slab.h>
 31 #include <linux/migrate.h>
 32 #include <linux/ratelimit.h>
 33 #include <linux/uuid.h>
 34 #include <asm/unaligned.h>
 35 #include "compat.h"
 36 #include "ctree.h"
 37 #include "disk-io.h"
 38 #include "transaction.h"
 39 #include "btrfs_inode.h"
 40 #include "volumes.h"
 41 #include "print-tree.h"
 42 #include "async-thread.h"
 43 #include "locking.h"
 44 #include "tree-log.h"
 45 #include "free-space-cache.h"
 46 #include "inode-map.h"
 47 #include "check-integrity.h"
 48 #include "rcu-string.h"
 49 #include "dev-replace.h"
 50 #include "raid56.h"
 51 
 52 #ifdef CONFIG_X86
 53 #include <asm/cpufeature.h>
 54 #endif
 55 
 56 static struct extent_io_ops btree_extent_io_ops;
 57 static void end_workqueue_fn(struct btrfs_work *work);
 58 static void free_fs_root(struct btrfs_root *root);
 59 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
 60                                     int read_only);
 61 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
 62                                              struct btrfs_root *root);
 63 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
 64 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 65                                       struct btrfs_root *root);
 66 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
 67 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
 68 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
 69                                         struct extent_io_tree *dirty_pages,
 70                                         int mark);
 71 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
 72                                        struct extent_io_tree *pinned_extents);
 73 static int btrfs_cleanup_transaction(struct btrfs_root *root);
 74 static void btrfs_error_commit_super(struct btrfs_root *root);
 75 
 76 /*
 77  * end_io_wq structs are used to do processing in task context when an IO is
 78  * complete.  This is used during reads to verify checksums, and it is used
 79  * by writes to insert metadata for new file extents after IO is complete.
 80  */
 81 struct end_io_wq {
 82         struct bio *bio;
 83         bio_end_io_t *end_io;
 84         void *private;
 85         struct btrfs_fs_info *info;
 86         int error;
 87         int metadata;
 88         struct list_head list;
 89         struct btrfs_work work;
 90 };
 91 
 92 /*
 93  * async submit bios are used to offload expensive checksumming
 94  * onto the worker threads.  They checksum file and metadata bios
 95  * just before they are sent down the IO stack.
 96  */
 97 struct async_submit_bio {
 98         struct inode *inode;
 99         struct bio *bio;
100         struct list_head list;
101         extent_submit_bio_hook_t *submit_bio_start;
102         extent_submit_bio_hook_t *submit_bio_done;
103         int rw;
104         int mirror_num;
105         unsigned long bio_flags;
106         /*
107          * bio_offset is optional, can be used if the pages in the bio
108          * can't tell us where in the file the bio should go
109          */
110         u64 bio_offset;
111         struct btrfs_work work;
112         int error;
113 };
114 
115 /*
116  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
117  * eb, the lockdep key is determined by the btrfs_root it belongs to and
118  * the level the eb occupies in the tree.
119  *
120  * Different roots are used for different purposes and may nest inside each
121  * other and they require separate keysets.  As lockdep keys should be
122  * static, assign keysets according to the purpose of the root as indicated
123  * by btrfs_root->objectid.  This ensures that all special purpose roots
124  * have separate keysets.
125  *
126  * Lock-nesting across peer nodes is always done with the immediate parent
127  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
128  * subclass to avoid triggering lockdep warning in such cases.
129  *
130  * The key is set by the readpage_end_io_hook after the buffer has passed
131  * csum validation but before the pages are unlocked.  It is also set by
132  * btrfs_init_new_buffer on freshly allocated blocks.
133  *
134  * We also add a check to make sure the highest level of the tree is the
135  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
136  * needs update as well.
137  */
138 #ifdef CONFIG_DEBUG_LOCK_ALLOC
139 # if BTRFS_MAX_LEVEL != 8
140 #  error
141 # endif
142 
143 static struct btrfs_lockdep_keyset {
144         u64                     id;             /* root objectid */
145         const char              *name_stem;     /* lock name stem */
146         char                    names[BTRFS_MAX_LEVEL + 1][20];
147         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
148 } btrfs_lockdep_keysets[] = {
149         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
150         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
151         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
152         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
153         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
154         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
155         { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
156         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
157         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
158         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
159         { .id = 0,                              .name_stem = "tree"     },
160 };
161 
162 void __init btrfs_init_lockdep(void)
163 {
164         int i, j;
165 
166         /* initialize lockdep class names */
167         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
168                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
169 
170                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
171                         snprintf(ks->names[j], sizeof(ks->names[j]),
172                                  "btrfs-%s-%02d", ks->name_stem, j);
173         }
174 }
175 
176 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
177                                     int level)
178 {
179         struct btrfs_lockdep_keyset *ks;
180 
181         BUG_ON(level >= ARRAY_SIZE(ks->keys));
182 
183         /* find the matching keyset, id 0 is the default entry */
184         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
185                 if (ks->id == objectid)
186                         break;
187 
188         lockdep_set_class_and_name(&eb->lock,
189                                    &ks->keys[level], ks->names[level]);
190 }
191 
192 #endif
193 
194 /*
195  * extents on the btree inode are pretty simple, there's one extent
196  * that covers the entire device
197  */
198 static struct extent_map *btree_get_extent(struct inode *inode,
199                 struct page *page, size_t pg_offset, u64 start, u64 len,
200                 int create)
201 {
202         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
203         struct extent_map *em;
204         int ret;
205 
206         read_lock(&em_tree->lock);
207         em = lookup_extent_mapping(em_tree, start, len);
208         if (em) {
209                 em->bdev =
210                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
211                 read_unlock(&em_tree->lock);
212                 goto out;
213         }
214         read_unlock(&em_tree->lock);
215 
216         em = alloc_extent_map();
217         if (!em) {
218                 em = ERR_PTR(-ENOMEM);
219                 goto out;
220         }
221         em->start = 0;
222         em->len = (u64)-1;
223         em->block_len = (u64)-1;
224         em->block_start = 0;
225         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
226 
227         write_lock(&em_tree->lock);
228         ret = add_extent_mapping(em_tree, em, 0);
229         if (ret == -EEXIST) {
230                 free_extent_map(em);
231                 em = lookup_extent_mapping(em_tree, start, len);
232                 if (!em)
233                         em = ERR_PTR(-EIO);
234         } else if (ret) {
235                 free_extent_map(em);
236                 em = ERR_PTR(ret);
237         }
238         write_unlock(&em_tree->lock);
239 
240 out:
241         return em;
242 }
243 
244 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
245 {
246         return crc32c(seed, data, len);
247 }
248 
249 void btrfs_csum_final(u32 crc, char *result)
250 {
251         put_unaligned_le32(~crc, result);
252 }
253 
254 /*
255  * compute the csum for a btree block, and either verify it or write it
256  * into the csum field of the block.
257  */
258 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
259                            int verify)
260 {
261         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
262         char *result = NULL;
263         unsigned long len;
264         unsigned long cur_len;
265         unsigned long offset = BTRFS_CSUM_SIZE;
266         char *kaddr;
267         unsigned long map_start;
268         unsigned long map_len;
269         int err;
270         u32 crc = ~(u32)0;
271         unsigned long inline_result;
272 
273         len = buf->len - offset;
274         while (len > 0) {
275                 err = map_private_extent_buffer(buf, offset, 32,
276                                         &kaddr, &map_start, &map_len);
277                 if (err)
278                         return 1;
279                 cur_len = min(len, map_len - (offset - map_start));
280                 crc = btrfs_csum_data(kaddr + offset - map_start,
281                                       crc, cur_len);
282                 len -= cur_len;
283                 offset += cur_len;
284         }
285         if (csum_size > sizeof(inline_result)) {
286                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
287                 if (!result)
288                         return 1;
289         } else {
290                 result = (char *)&inline_result;
291         }
292 
293         btrfs_csum_final(crc, result);
294 
295         if (verify) {
296                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
297                         u32 val;
298                         u32 found = 0;
299                         memcpy(&found, result, csum_size);
300 
301                         read_extent_buffer(buf, &val, 0, csum_size);
302                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
303                                        "failed on %llu wanted %X found %X "
304                                        "level %d\n",
305                                        root->fs_info->sb->s_id,
306                                        (unsigned long long)buf->start, val, found,
307                                        btrfs_header_level(buf));
308                         if (result != (char *)&inline_result)
309                                 kfree(result);
310                         return 1;
311                 }
312         } else {
313                 write_extent_buffer(buf, result, 0, csum_size);
314         }
315         if (result != (char *)&inline_result)
316                 kfree(result);
317         return 0;
318 }
319 
320 /*
321  * we can't consider a given block up to date unless the transid of the
322  * block matches the transid in the parent node's pointer.  This is how we
323  * detect blocks that either didn't get written at all or got written
324  * in the wrong place.
325  */
326 static int verify_parent_transid(struct extent_io_tree *io_tree,
327                                  struct extent_buffer *eb, u64 parent_transid,
328                                  int atomic)
329 {
330         struct extent_state *cached_state = NULL;
331         int ret;
332 
333         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
334                 return 0;
335 
336         if (atomic)
337                 return -EAGAIN;
338 
339         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
340                          0, &cached_state);
341         if (extent_buffer_uptodate(eb) &&
342             btrfs_header_generation(eb) == parent_transid) {
343                 ret = 0;
344                 goto out;
345         }
346         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
347                        "found %llu\n",
348                        (unsigned long long)eb->start,
349                        (unsigned long long)parent_transid,
350                        (unsigned long long)btrfs_header_generation(eb));
351         ret = 1;
352         clear_extent_buffer_uptodate(eb);
353 out:
354         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
355                              &cached_state, GFP_NOFS);
356         return ret;
357 }
358 
359 /*
360  * Return 0 if the superblock checksum type matches the checksum value of that
361  * algorithm. Pass the raw disk superblock data.
362  */
363 static int btrfs_check_super_csum(char *raw_disk_sb)
364 {
365         struct btrfs_super_block *disk_sb =
366                 (struct btrfs_super_block *)raw_disk_sb;
367         u16 csum_type = btrfs_super_csum_type(disk_sb);
368         int ret = 0;
369 
370         if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
371                 u32 crc = ~(u32)0;
372                 const int csum_size = sizeof(crc);
373                 char result[csum_size];
374 
375                 /*
376                  * The super_block structure does not span the whole
377                  * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
378                  * is filled with zeros and is included in the checkum.
379                  */
380                 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
381                                 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
382                 btrfs_csum_final(crc, result);
383 
384                 if (memcmp(raw_disk_sb, result, csum_size))
385                         ret = 1;
386 
387                 if (ret && btrfs_super_generation(disk_sb) < 10) {
388                         printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
389                         ret = 0;
390                 }
391         }
392 
393         if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
394                 printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
395                                 csum_type);
396                 ret = 1;
397         }
398 
399         return ret;
400 }
401 
402 /*
403  * helper to read a given tree block, doing retries as required when
404  * the checksums don't match and we have alternate mirrors to try.
405  */
406 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
407                                           struct extent_buffer *eb,
408                                           u64 start, u64 parent_transid)
409 {
410         struct extent_io_tree *io_tree;
411         int failed = 0;
412         int ret;
413         int num_copies = 0;
414         int mirror_num = 0;
415         int failed_mirror = 0;
416 
417         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
418         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
419         while (1) {
420                 ret = read_extent_buffer_pages(io_tree, eb, start,
421                                                WAIT_COMPLETE,
422                                                btree_get_extent, mirror_num);
423                 if (!ret) {
424                         if (!verify_parent_transid(io_tree, eb,
425                                                    parent_transid, 0))
426                                 break;
427                         else
428                                 ret = -EIO;
429                 }
430 
431                 /*
432                  * This buffer's crc is fine, but its contents are corrupted, so
433                  * there is no reason to read the other copies, they won't be
434                  * any less wrong.
435                  */
436                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
437                         break;
438 
439                 num_copies = btrfs_num_copies(root->fs_info,
440                                               eb->start, eb->len);
441                 if (num_copies == 1)
442                         break;
443 
444                 if (!failed_mirror) {
445                         failed = 1;
446                         failed_mirror = eb->read_mirror;
447                 }
448 
449                 mirror_num++;
450                 if (mirror_num == failed_mirror)
451                         mirror_num++;
452 
453                 if (mirror_num > num_copies)
454                         break;
455         }
456 
457         if (failed && !ret && failed_mirror)
458                 repair_eb_io_failure(root, eb, failed_mirror);
459 
460         return ret;
461 }
462 
463 /*
464  * checksum a dirty tree block before IO.  This has extra checks to make sure
465  * we only fill in the checksum field in the first page of a multi-page block
466  */
467 
468 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
469 {
470         struct extent_io_tree *tree;
471         u64 start = page_offset(page);
472         u64 found_start;
473         struct extent_buffer *eb;
474 
475         tree = &BTRFS_I(page->mapping->host)->io_tree;
476 
477         eb = (struct extent_buffer *)page->private;
478         if (page != eb->pages[0])
479                 return 0;
480         found_start = btrfs_header_bytenr(eb);
481         if (found_start != start) {
482                 WARN_ON(1);
483                 return 0;
484         }
485         if (!PageUptodate(page)) {
486                 WARN_ON(1);
487                 return 0;
488         }
489         csum_tree_block(root, eb, 0);
490         return 0;
491 }
492 
493 static int check_tree_block_fsid(struct btrfs_root *root,
494                                  struct extent_buffer *eb)
495 {
496         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
497         u8 fsid[BTRFS_UUID_SIZE];
498         int ret = 1;
499 
500         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
501                            BTRFS_FSID_SIZE);
502         while (fs_devices) {
503                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
504                         ret = 0;
505                         break;
506                 }
507                 fs_devices = fs_devices->seed;
508         }
509         return ret;
510 }
511 
512 #define CORRUPT(reason, eb, root, slot)                         \
513         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
514                "root=%llu, slot=%d\n", reason,                  \
515                (unsigned long long)btrfs_header_bytenr(eb),     \
516                (unsigned long long)root->objectid, slot)
517 
518 static noinline int check_leaf(struct btrfs_root *root,
519                                struct extent_buffer *leaf)
520 {
521         struct btrfs_key key;
522         struct btrfs_key leaf_key;
523         u32 nritems = btrfs_header_nritems(leaf);
524         int slot;
525 
526         if (nritems == 0)
527                 return 0;
528 
529         /* Check the 0 item */
530         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
531             BTRFS_LEAF_DATA_SIZE(root)) {
532                 CORRUPT("invalid item offset size pair", leaf, root, 0);
533                 return -EIO;
534         }
535 
536         /*
537          * Check to make sure each items keys are in the correct order and their
538          * offsets make sense.  We only have to loop through nritems-1 because
539          * we check the current slot against the next slot, which verifies the
540          * next slot's offset+size makes sense and that the current's slot
541          * offset is correct.
542          */
543         for (slot = 0; slot < nritems - 1; slot++) {
544                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
545                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
546 
547                 /* Make sure the keys are in the right order */
548                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
549                         CORRUPT("bad key order", leaf, root, slot);
550                         return -EIO;
551                 }
552 
553                 /*
554                  * Make sure the offset and ends are right, remember that the
555                  * item data starts at the end of the leaf and grows towards the
556                  * front.
557                  */
558                 if (btrfs_item_offset_nr(leaf, slot) !=
559                         btrfs_item_end_nr(leaf, slot + 1)) {
560                         CORRUPT("slot offset bad", leaf, root, slot);
561                         return -EIO;
562                 }
563 
564                 /*
565                  * Check to make sure that we don't point outside of the leaf,
566                  * just incase all the items are consistent to eachother, but
567                  * all point outside of the leaf.
568                  */
569                 if (btrfs_item_end_nr(leaf, slot) >
570                     BTRFS_LEAF_DATA_SIZE(root)) {
571                         CORRUPT("slot end outside of leaf", leaf, root, slot);
572                         return -EIO;
573                 }
574         }
575 
576         return 0;
577 }
578 
579 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
580                                struct extent_state *state, int mirror)
581 {
582         struct extent_io_tree *tree;
583         u64 found_start;
584         int found_level;
585         struct extent_buffer *eb;
586         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
587         int ret = 0;
588         int reads_done;
589 
590         if (!page->private)
591                 goto out;
592 
593         tree = &BTRFS_I(page->mapping->host)->io_tree;
594         eb = (struct extent_buffer *)page->private;
595 
596         /* the pending IO might have been the only thing that kept this buffer
597          * in memory.  Make sure we have a ref for all this other checks
598          */
599         extent_buffer_get(eb);
600 
601         reads_done = atomic_dec_and_test(&eb->io_pages);
602         if (!reads_done)
603                 goto err;
604 
605         eb->read_mirror = mirror;
606         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
607                 ret = -EIO;
608                 goto err;
609         }
610 
611         found_start = btrfs_header_bytenr(eb);
612         if (found_start != eb->start) {
613                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
614                                "%llu %llu\n",
615                                (unsigned long long)found_start,
616                                (unsigned long long)eb->start);
617                 ret = -EIO;
618                 goto err;
619         }
620         if (check_tree_block_fsid(root, eb)) {
621                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
622                                (unsigned long long)eb->start);
623                 ret = -EIO;
624                 goto err;
625         }
626         found_level = btrfs_header_level(eb);
627         if (found_level >= BTRFS_MAX_LEVEL) {
628                 btrfs_info(root->fs_info, "bad tree block level %d\n",
629                            (int)btrfs_header_level(eb));
630                 ret = -EIO;
631                 goto err;
632         }
633 
634         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
635                                        eb, found_level);
636 
637         ret = csum_tree_block(root, eb, 1);
638         if (ret) {
639                 ret = -EIO;
640                 goto err;
641         }
642 
643         /*
644          * If this is a leaf block and it is corrupt, set the corrupt bit so
645          * that we don't try and read the other copies of this block, just
646          * return -EIO.
647          */
648         if (found_level == 0 && check_leaf(root, eb)) {
649                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
650                 ret = -EIO;
651         }
652 
653         if (!ret)
654                 set_extent_buffer_uptodate(eb);
655 err:
656         if (reads_done &&
657             test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
658                 btree_readahead_hook(root, eb, eb->start, ret);
659 
660         if (ret) {
661                 /*
662                  * our io error hook is going to dec the io pages
663                  * again, we have to make sure it has something
664                  * to decrement
665                  */
666                 atomic_inc(&eb->io_pages);
667                 clear_extent_buffer_uptodate(eb);
668         }
669         free_extent_buffer(eb);
670 out:
671         return ret;
672 }
673 
674 static int btree_io_failed_hook(struct page *page, int failed_mirror)
675 {
676         struct extent_buffer *eb;
677         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
678 
679         eb = (struct extent_buffer *)page->private;
680         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
681         eb->read_mirror = failed_mirror;
682         atomic_dec(&eb->io_pages);
683         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
684                 btree_readahead_hook(root, eb, eb->start, -EIO);
685         return -EIO;    /* we fixed nothing */
686 }
687 
688 static void end_workqueue_bio(struct bio *bio, int err)
689 {
690         struct end_io_wq *end_io_wq = bio->bi_private;
691         struct btrfs_fs_info *fs_info;
692 
693         fs_info = end_io_wq->info;
694         end_io_wq->error = err;
695         end_io_wq->work.func = end_workqueue_fn;
696         end_io_wq->work.flags = 0;
697 
698         if (bio->bi_rw & REQ_WRITE) {
699                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
700                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
701                                            &end_io_wq->work);
702                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
703                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
704                                            &end_io_wq->work);
705                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
706                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
707                                            &end_io_wq->work);
708                 else
709                         btrfs_queue_worker(&fs_info->endio_write_workers,
710                                            &end_io_wq->work);
711         } else {
712                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
713                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
714                                            &end_io_wq->work);
715                 else if (end_io_wq->metadata)
716                         btrfs_queue_worker(&fs_info->endio_meta_workers,
717                                            &end_io_wq->work);
718                 else
719                         btrfs_queue_worker(&fs_info->endio_workers,
720                                            &end_io_wq->work);
721         }
722 }
723 
724 /*
725  * For the metadata arg you want
726  *
727  * 0 - if data
728  * 1 - if normal metadta
729  * 2 - if writing to the free space cache area
730  * 3 - raid parity work
731  */
732 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
733                         int metadata)
734 {
735         struct end_io_wq *end_io_wq;
736         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
737         if (!end_io_wq)
738                 return -ENOMEM;
739 
740         end_io_wq->private = bio->bi_private;
741         end_io_wq->end_io = bio->bi_end_io;
742         end_io_wq->info = info;
743         end_io_wq->error = 0;
744         end_io_wq->bio = bio;
745         end_io_wq->metadata = metadata;
746 
747         bio->bi_private = end_io_wq;
748         bio->bi_end_io = end_workqueue_bio;
749         return 0;
750 }
751 
752 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
753 {
754         unsigned long limit = min_t(unsigned long,
755                                     info->workers.max_workers,
756                                     info->fs_devices->open_devices);
757         return 256 * limit;
758 }
759 
760 static void run_one_async_start(struct btrfs_work *work)
761 {
762         struct async_submit_bio *async;
763         int ret;
764 
765         async = container_of(work, struct  async_submit_bio, work);
766         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
767                                       async->mirror_num, async->bio_flags,
768                                       async->bio_offset);
769         if (ret)
770                 async->error = ret;
771 }
772 
773 static void run_one_async_done(struct btrfs_work *work)
774 {
775         struct btrfs_fs_info *fs_info;
776         struct async_submit_bio *async;
777         int limit;
778 
779         async = container_of(work, struct  async_submit_bio, work);
780         fs_info = BTRFS_I(async->inode)->root->fs_info;
781 
782         limit = btrfs_async_submit_limit(fs_info);
783         limit = limit * 2 / 3;
784 
785         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
786             waitqueue_active(&fs_info->async_submit_wait))
787                 wake_up(&fs_info->async_submit_wait);
788 
789         /* If an error occured we just want to clean up the bio and move on */
790         if (async->error) {
791                 bio_endio(async->bio, async->error);
792                 return;
793         }
794 
795         async->submit_bio_done(async->inode, async->rw, async->bio,
796                                async->mirror_num, async->bio_flags,
797                                async->bio_offset);
798 }
799 
800 static void run_one_async_free(struct btrfs_work *work)
801 {
802         struct async_submit_bio *async;
803 
804         async = container_of(work, struct  async_submit_bio, work);
805         kfree(async);
806 }
807 
808 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
809                         int rw, struct bio *bio, int mirror_num,
810                         unsigned long bio_flags,
811                         u64 bio_offset,
812                         extent_submit_bio_hook_t *submit_bio_start,
813                         extent_submit_bio_hook_t *submit_bio_done)
814 {
815         struct async_submit_bio *async;
816 
817         async = kmalloc(sizeof(*async), GFP_NOFS);
818         if (!async)
819                 return -ENOMEM;
820 
821         async->inode = inode;
822         async->rw = rw;
823         async->bio = bio;
824         async->mirror_num = mirror_num;
825         async->submit_bio_start = submit_bio_start;
826         async->submit_bio_done = submit_bio_done;
827 
828         async->work.func = run_one_async_start;
829         async->work.ordered_func = run_one_async_done;
830         async->work.ordered_free = run_one_async_free;
831 
832         async->work.flags = 0;
833         async->bio_flags = bio_flags;
834         async->bio_offset = bio_offset;
835 
836         async->error = 0;
837 
838         atomic_inc(&fs_info->nr_async_submits);
839 
840         if (rw & REQ_SYNC)
841                 btrfs_set_work_high_prio(&async->work);
842 
843         btrfs_queue_worker(&fs_info->workers, &async->work);
844 
845         while (atomic_read(&fs_info->async_submit_draining) &&
846               atomic_read(&fs_info->nr_async_submits)) {
847                 wait_event(fs_info->async_submit_wait,
848                            (atomic_read(&fs_info->nr_async_submits) == 0));
849         }
850 
851         return 0;
852 }
853 
854 static int btree_csum_one_bio(struct bio *bio)
855 {
856         struct bio_vec *bvec = bio->bi_io_vec;
857         int bio_index = 0;
858         struct btrfs_root *root;
859         int ret = 0;
860 
861         WARN_ON(bio->bi_vcnt <= 0);
862         while (bio_index < bio->bi_vcnt) {
863                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
864                 ret = csum_dirty_buffer(root, bvec->bv_page);
865                 if (ret)
866                         break;
867                 bio_index++;
868                 bvec++;
869         }
870         return ret;
871 }
872 
873 static int __btree_submit_bio_start(struct inode *inode, int rw,
874                                     struct bio *bio, int mirror_num,
875                                     unsigned long bio_flags,
876                                     u64 bio_offset)
877 {
878         /*
879          * when we're called for a write, we're already in the async
880          * submission context.  Just jump into btrfs_map_bio
881          */
882         return btree_csum_one_bio(bio);
883 }
884 
885 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
886                                  int mirror_num, unsigned long bio_flags,
887                                  u64 bio_offset)
888 {
889         int ret;
890 
891         /*
892          * when we're called for a write, we're already in the async
893          * submission context.  Just jump into btrfs_map_bio
894          */
895         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
896         if (ret)
897                 bio_endio(bio, ret);
898         return ret;
899 }
900 
901 static int check_async_write(struct inode *inode, unsigned long bio_flags)
902 {
903         if (bio_flags & EXTENT_BIO_TREE_LOG)
904                 return 0;
905 #ifdef CONFIG_X86
906         if (cpu_has_xmm4_2)
907                 return 0;
908 #endif
909         return 1;
910 }
911 
912 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
913                                  int mirror_num, unsigned long bio_flags,
914                                  u64 bio_offset)
915 {
916         int async = check_async_write(inode, bio_flags);
917         int ret;
918 
919         if (!(rw & REQ_WRITE)) {
920                 /*
921                  * called for a read, do the setup so that checksum validation
922                  * can happen in the async kernel threads
923                  */
924                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
925                                           bio, 1);
926                 if (ret)
927                         goto out_w_error;
928                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
929                                     mirror_num, 0);
930         } else if (!async) {
931                 ret = btree_csum_one_bio(bio);
932                 if (ret)
933                         goto out_w_error;
934                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
935                                     mirror_num, 0);
936         } else {
937                 /*
938                  * kthread helpers are used to submit writes so that
939                  * checksumming can happen in parallel across all CPUs
940                  */
941                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
942                                           inode, rw, bio, mirror_num, 0,
943                                           bio_offset,
944                                           __btree_submit_bio_start,
945                                           __btree_submit_bio_done);
946         }
947 
948         if (ret) {
949 out_w_error:
950                 bio_endio(bio, ret);
951         }
952         return ret;
953 }
954 
955 #ifdef CONFIG_MIGRATION
956 static int btree_migratepage(struct address_space *mapping,
957                         struct page *newpage, struct page *page,
958                         enum migrate_mode mode)
959 {
960         /*
961          * we can't safely write a btree page from here,
962          * we haven't done the locking hook
963          */
964         if (PageDirty(page))
965                 return -EAGAIN;
966         /*
967          * Buffers may be managed in a filesystem specific way.
968          * We must have no buffers or drop them.
969          */
970         if (page_has_private(page) &&
971             !try_to_release_page(page, GFP_KERNEL))
972                 return -EAGAIN;
973         return migrate_page(mapping, newpage, page, mode);
974 }
975 #endif
976 
977 
978 static int btree_writepages(struct address_space *mapping,
979                             struct writeback_control *wbc)
980 {
981         struct extent_io_tree *tree;
982         struct btrfs_fs_info *fs_info;
983         int ret;
984 
985         tree = &BTRFS_I(mapping->host)->io_tree;
986         if (wbc->sync_mode == WB_SYNC_NONE) {
987 
988                 if (wbc->for_kupdate)
989                         return 0;
990 
991                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
992                 /* this is a bit racy, but that's ok */
993                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
994                                              BTRFS_DIRTY_METADATA_THRESH);
995                 if (ret < 0)
996                         return 0;
997         }
998         return btree_write_cache_pages(mapping, wbc);
999 }
1000 
1001 static int btree_readpage(struct file *file, struct page *page)
1002 {
1003         struct extent_io_tree *tree;
1004         tree = &BTRFS_I(page->mapping->host)->io_tree;
1005         return extent_read_full_page(tree, page, btree_get_extent, 0);
1006 }
1007 
1008 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1009 {
1010         if (PageWriteback(page) || PageDirty(page))
1011                 return 0;
1012 
1013         return try_release_extent_buffer(page);
1014 }
1015 
1016 static void btree_invalidatepage(struct page *page, unsigned long offset)
1017 {
1018         struct extent_io_tree *tree;
1019         tree = &BTRFS_I(page->mapping->host)->io_tree;
1020         extent_invalidatepage(tree, page, offset);
1021         btree_releasepage(page, GFP_NOFS);
1022         if (PagePrivate(page)) {
1023                 printk(KERN_WARNING "btrfs warning page private not zero "
1024                        "on page %llu\n", (unsigned long long)page_offset(page));
1025                 ClearPagePrivate(page);
1026                 set_page_private(page, 0);
1027                 page_cache_release(page);
1028         }
1029 }
1030 
1031 static int btree_set_page_dirty(struct page *page)
1032 {
1033 #ifdef DEBUG
1034         struct extent_buffer *eb;
1035 
1036         BUG_ON(!PagePrivate(page));
1037         eb = (struct extent_buffer *)page->private;
1038         BUG_ON(!eb);
1039         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1040         BUG_ON(!atomic_read(&eb->refs));
1041         btrfs_assert_tree_locked(eb);
1042 #endif
1043         return __set_page_dirty_nobuffers(page);
1044 }
1045 
1046 static const struct address_space_operations btree_aops = {
1047         .readpage       = btree_readpage,
1048         .writepages     = btree_writepages,
1049         .releasepage    = btree_releasepage,
1050         .invalidatepage = btree_invalidatepage,
1051 #ifdef CONFIG_MIGRATION
1052         .migratepage    = btree_migratepage,
1053 #endif
1054         .set_page_dirty = btree_set_page_dirty,
1055 };
1056 
1057 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1058                          u64 parent_transid)
1059 {
1060         struct extent_buffer *buf = NULL;
1061         struct inode *btree_inode = root->fs_info->btree_inode;
1062         int ret = 0;
1063 
1064         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1065         if (!buf)
1066                 return 0;
1067         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1068                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1069         free_extent_buffer(buf);
1070         return ret;
1071 }
1072 
1073 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1074                          int mirror_num, struct extent_buffer **eb)
1075 {
1076         struct extent_buffer *buf = NULL;
1077         struct inode *btree_inode = root->fs_info->btree_inode;
1078         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1079         int ret;
1080 
1081         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1082         if (!buf)
1083                 return 0;
1084 
1085         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1086 
1087         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1088                                        btree_get_extent, mirror_num);
1089         if (ret) {
1090                 free_extent_buffer(buf);
1091                 return ret;
1092         }
1093 
1094         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1095                 free_extent_buffer(buf);
1096                 return -EIO;
1097         } else if (extent_buffer_uptodate(buf)) {
1098                 *eb = buf;
1099         } else {
1100                 free_extent_buffer(buf);
1101         }
1102         return 0;
1103 }
1104 
1105 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1106                                             u64 bytenr, u32 blocksize)
1107 {
1108         struct inode *btree_inode = root->fs_info->btree_inode;
1109         struct extent_buffer *eb;
1110         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1111                                 bytenr, blocksize);
1112         return eb;
1113 }
1114 
1115 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1116                                                  u64 bytenr, u32 blocksize)
1117 {
1118         struct inode *btree_inode = root->fs_info->btree_inode;
1119         struct extent_buffer *eb;
1120 
1121         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1122                                  bytenr, blocksize);
1123         return eb;
1124 }
1125 
1126 
1127 int btrfs_write_tree_block(struct extent_buffer *buf)
1128 {
1129         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1130                                         buf->start + buf->len - 1);
1131 }
1132 
1133 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1134 {
1135         return filemap_fdatawait_range(buf->pages[0]->mapping,
1136                                        buf->start, buf->start + buf->len - 1);
1137 }
1138 
1139 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1140                                       u32 blocksize, u64 parent_transid)
1141 {
1142         struct extent_buffer *buf = NULL;
1143         int ret;
1144 
1145         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1146         if (!buf)
1147                 return NULL;
1148 
1149         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1150         return buf;
1151 
1152 }
1153 
1154 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1155                       struct extent_buffer *buf)
1156 {
1157         struct btrfs_fs_info *fs_info = root->fs_info;
1158 
1159         if (btrfs_header_generation(buf) ==
1160             fs_info->running_transaction->transid) {
1161                 btrfs_assert_tree_locked(buf);
1162 
1163                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1164                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1165                                              -buf->len,
1166                                              fs_info->dirty_metadata_batch);
1167                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1168                         btrfs_set_lock_blocking(buf);
1169                         clear_extent_buffer_dirty(buf);
1170                 }
1171         }
1172 }
1173 
1174 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1175                          u32 stripesize, struct btrfs_root *root,
1176                          struct btrfs_fs_info *fs_info,
1177                          u64 objectid)
1178 {
1179         root->node = NULL;
1180         root->commit_root = NULL;
1181         root->sectorsize = sectorsize;
1182         root->nodesize = nodesize;
1183         root->leafsize = leafsize;
1184         root->stripesize = stripesize;
1185         root->ref_cows = 0;
1186         root->track_dirty = 0;
1187         root->in_radix = 0;
1188         root->orphan_item_inserted = 0;
1189         root->orphan_cleanup_state = 0;
1190 
1191         root->objectid = objectid;
1192         root->last_trans = 0;
1193         root->highest_objectid = 0;
1194         root->name = NULL;
1195         root->inode_tree = RB_ROOT;
1196         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1197         root->block_rsv = NULL;
1198         root->orphan_block_rsv = NULL;
1199 
1200         INIT_LIST_HEAD(&root->dirty_list);
1201         INIT_LIST_HEAD(&root->root_list);
1202         INIT_LIST_HEAD(&root->logged_list[0]);
1203         INIT_LIST_HEAD(&root->logged_list[1]);
1204         spin_lock_init(&root->orphan_lock);
1205         spin_lock_init(&root->inode_lock);
1206         spin_lock_init(&root->accounting_lock);
1207         spin_lock_init(&root->log_extents_lock[0]);
1208         spin_lock_init(&root->log_extents_lock[1]);
1209         mutex_init(&root->objectid_mutex);
1210         mutex_init(&root->log_mutex);
1211         init_waitqueue_head(&root->log_writer_wait);
1212         init_waitqueue_head(&root->log_commit_wait[0]);
1213         init_waitqueue_head(&root->log_commit_wait[1]);
1214         atomic_set(&root->log_commit[0], 0);
1215         atomic_set(&root->log_commit[1], 0);
1216         atomic_set(&root->log_writers, 0);
1217         atomic_set(&root->log_batch, 0);
1218         atomic_set(&root->orphan_inodes, 0);
1219         root->log_transid = 0;
1220         root->last_log_commit = 0;
1221         extent_io_tree_init(&root->dirty_log_pages,
1222                              fs_info->btree_inode->i_mapping);
1223 
1224         memset(&root->root_key, 0, sizeof(root->root_key));
1225         memset(&root->root_item, 0, sizeof(root->root_item));
1226         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1227         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1228         root->defrag_trans_start = fs_info->generation;
1229         init_completion(&root->kobj_unregister);
1230         root->defrag_running = 0;
1231         root->root_key.objectid = objectid;
1232         root->anon_dev = 0;
1233 
1234         spin_lock_init(&root->root_item_lock);
1235 }
1236 
1237 static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1238                                             struct btrfs_fs_info *fs_info,
1239                                             u64 objectid,
1240                                             struct btrfs_root *root)
1241 {
1242         int ret;
1243         u32 blocksize;
1244         u64 generation;
1245 
1246         __setup_root(tree_root->nodesize, tree_root->leafsize,
1247                      tree_root->sectorsize, tree_root->stripesize,
1248                      root, fs_info, objectid);
1249         ret = btrfs_find_last_root(tree_root, objectid,
1250                                    &root->root_item, &root->root_key);
1251         if (ret > 0)
1252                 return -ENOENT;
1253         else if (ret < 0)
1254                 return ret;
1255 
1256         generation = btrfs_root_generation(&root->root_item);
1257         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1258         root->commit_root = NULL;
1259         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1260                                      blocksize, generation);
1261         if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
1262                 free_extent_buffer(root->node);
1263                 root->node = NULL;
1264                 return -EIO;
1265         }
1266         root->commit_root = btrfs_root_node(root);
1267         return 0;
1268 }
1269 
1270 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1271 {
1272         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1273         if (root)
1274                 root->fs_info = fs_info;
1275         return root;
1276 }
1277 
1278 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1279                                      struct btrfs_fs_info *fs_info,
1280                                      u64 objectid)
1281 {
1282         struct extent_buffer *leaf;
1283         struct btrfs_root *tree_root = fs_info->tree_root;
1284         struct btrfs_root *root;
1285         struct btrfs_key key;
1286         int ret = 0;
1287         u64 bytenr;
1288         uuid_le uuid;
1289 
1290         root = btrfs_alloc_root(fs_info);
1291         if (!root)
1292                 return ERR_PTR(-ENOMEM);
1293 
1294         __setup_root(tree_root->nodesize, tree_root->leafsize,
1295                      tree_root->sectorsize, tree_root->stripesize,
1296                      root, fs_info, objectid);
1297         root->root_key.objectid = objectid;
1298         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1299         root->root_key.offset = 0;
1300 
1301         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1302                                       0, objectid, NULL, 0, 0, 0);
1303         if (IS_ERR(leaf)) {
1304                 ret = PTR_ERR(leaf);
1305                 leaf = NULL;
1306                 goto fail;
1307         }
1308 
1309         bytenr = leaf->start;
1310         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1311         btrfs_set_header_bytenr(leaf, leaf->start);
1312         btrfs_set_header_generation(leaf, trans->transid);
1313         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1314         btrfs_set_header_owner(leaf, objectid);
1315         root->node = leaf;
1316 
1317         write_extent_buffer(leaf, fs_info->fsid,
1318                             (unsigned long)btrfs_header_fsid(leaf),
1319                             BTRFS_FSID_SIZE);
1320         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1321                             (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1322                             BTRFS_UUID_SIZE);
1323         btrfs_mark_buffer_dirty(leaf);
1324 
1325         root->commit_root = btrfs_root_node(root);
1326         root->track_dirty = 1;
1327 
1328 
1329         root->root_item.flags = 0;
1330         root->root_item.byte_limit = 0;
1331         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1332         btrfs_set_root_generation(&root->root_item, trans->transid);
1333         btrfs_set_root_level(&root->root_item, 0);
1334         btrfs_set_root_refs(&root->root_item, 1);
1335         btrfs_set_root_used(&root->root_item, leaf->len);
1336         btrfs_set_root_last_snapshot(&root->root_item, 0);
1337         btrfs_set_root_dirid(&root->root_item, 0);
1338         uuid_le_gen(&uuid);
1339         memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1340         root->root_item.drop_level = 0;
1341 
1342         key.objectid = objectid;
1343         key.type = BTRFS_ROOT_ITEM_KEY;
1344         key.offset = 0;
1345         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1346         if (ret)
1347                 goto fail;
1348 
1349         btrfs_tree_unlock(leaf);
1350 
1351         return root;
1352 
1353 fail:
1354         if (leaf) {
1355                 btrfs_tree_unlock(leaf);
1356                 free_extent_buffer(leaf);
1357         }
1358         kfree(root);
1359 
1360         return ERR_PTR(ret);
1361 }
1362 
1363 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1364                                          struct btrfs_fs_info *fs_info)
1365 {
1366         struct btrfs_root *root;
1367         struct btrfs_root *tree_root = fs_info->tree_root;
1368         struct extent_buffer *leaf;
1369 
1370         root = btrfs_alloc_root(fs_info);
1371         if (!root)
1372                 return ERR_PTR(-ENOMEM);
1373 
1374         __setup_root(tree_root->nodesize, tree_root->leafsize,
1375                      tree_root->sectorsize, tree_root->stripesize,
1376                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1377 
1378         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1379         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1380         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1381         /*
1382          * log trees do not get reference counted because they go away
1383          * before a real commit is actually done.  They do store pointers
1384          * to file data extents, and those reference counts still get
1385          * updated (along with back refs to the log tree).
1386          */
1387         root->ref_cows = 0;
1388 
1389         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1390                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1391                                       0, 0, 0);
1392         if (IS_ERR(leaf)) {
1393                 kfree(root);
1394                 return ERR_CAST(leaf);
1395         }
1396 
1397         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1398         btrfs_set_header_bytenr(leaf, leaf->start);
1399         btrfs_set_header_generation(leaf, trans->transid);
1400         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1401         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1402         root->node = leaf;
1403 
1404         write_extent_buffer(root->node, root->fs_info->fsid,
1405                             (unsigned long)btrfs_header_fsid(root->node),
1406                             BTRFS_FSID_SIZE);
1407         btrfs_mark_buffer_dirty(root->node);
1408         btrfs_tree_unlock(root->node);
1409         return root;
1410 }
1411 
1412 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1413                              struct btrfs_fs_info *fs_info)
1414 {
1415         struct btrfs_root *log_root;
1416 
1417         log_root = alloc_log_tree(trans, fs_info);
1418         if (IS_ERR(log_root))
1419                 return PTR_ERR(log_root);
1420         WARN_ON(fs_info->log_root_tree);
1421         fs_info->log_root_tree = log_root;
1422         return 0;
1423 }
1424 
1425 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1426                        struct btrfs_root *root)
1427 {
1428         struct btrfs_root *log_root;
1429         struct btrfs_inode_item *inode_item;
1430 
1431         log_root = alloc_log_tree(trans, root->fs_info);
1432         if (IS_ERR(log_root))
1433                 return PTR_ERR(log_root);
1434 
1435         log_root->last_trans = trans->transid;
1436         log_root->root_key.offset = root->root_key.objectid;
1437 
1438         inode_item = &log_root->root_item.inode;
1439         inode_item->generation = cpu_to_le64(1);
1440         inode_item->size = cpu_to_le64(3);
1441         inode_item->nlink = cpu_to_le32(1);
1442         inode_item->nbytes = cpu_to_le64(root->leafsize);
1443         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1444 
1445         btrfs_set_root_node(&log_root->root_item, log_root->node);
1446 
1447         WARN_ON(root->log_root);
1448         root->log_root = log_root;
1449         root->log_transid = 0;
1450         root->last_log_commit = 0;
1451         return 0;
1452 }
1453 
1454 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1455                                                struct btrfs_key *location)
1456 {
1457         struct btrfs_root *root;
1458         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1459         struct btrfs_path *path;
1460         struct extent_buffer *l;
1461         u64 generation;
1462         u32 blocksize;
1463         int ret = 0;
1464         int slot;
1465 
1466         root = btrfs_alloc_root(fs_info);
1467         if (!root)
1468                 return ERR_PTR(-ENOMEM);
1469         if (location->offset == (u64)-1) {
1470                 ret = find_and_setup_root(tree_root, fs_info,
1471                                           location->objectid, root);
1472                 if (ret) {
1473                         kfree(root);
1474                         return ERR_PTR(ret);
1475                 }
1476                 goto out;
1477         }
1478 
1479         __setup_root(tree_root->nodesize, tree_root->leafsize,
1480                      tree_root->sectorsize, tree_root->stripesize,
1481                      root, fs_info, location->objectid);
1482 
1483         path = btrfs_alloc_path();
1484         if (!path) {
1485                 kfree(root);
1486                 return ERR_PTR(-ENOMEM);
1487         }
1488         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1489         if (ret == 0) {
1490                 l = path->nodes[0];
1491                 slot = path->slots[0];
1492                 btrfs_read_root_item(l, slot, &root->root_item);
1493                 memcpy(&root->root_key, location, sizeof(*location));
1494         }
1495         btrfs_free_path(path);
1496         if (ret) {
1497                 kfree(root);
1498                 if (ret > 0)
1499                         ret = -ENOENT;
1500                 return ERR_PTR(ret);
1501         }
1502 
1503         generation = btrfs_root_generation(&root->root_item);
1504         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1505         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1506                                      blocksize, generation);
1507         if (!root->node || !extent_buffer_uptodate(root->node)) {
1508                 ret = (!root->node) ? -ENOMEM : -EIO;
1509 
1510                 free_extent_buffer(root->node);
1511                 kfree(root);
1512                 return ERR_PTR(ret);
1513         }
1514 
1515         root->commit_root = btrfs_root_node(root);
1516 out:
1517         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1518                 root->ref_cows = 1;
1519                 btrfs_check_and_init_root_item(&root->root_item);
1520         }
1521 
1522         return root;
1523 }
1524 
1525 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1526                                               struct btrfs_key *location)
1527 {
1528         struct btrfs_root *root;
1529         int ret;
1530 
1531         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1532                 return fs_info->tree_root;
1533         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1534                 return fs_info->extent_root;
1535         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1536                 return fs_info->chunk_root;
1537         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1538                 return fs_info->dev_root;
1539         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1540                 return fs_info->csum_root;
1541         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1542                 return fs_info->quota_root ? fs_info->quota_root :
1543                                              ERR_PTR(-ENOENT);
1544 again:
1545         spin_lock(&fs_info->fs_roots_radix_lock);
1546         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1547                                  (unsigned long)location->objectid);
1548         spin_unlock(&fs_info->fs_roots_radix_lock);
1549         if (root)
1550                 return root;
1551 
1552         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1553         if (IS_ERR(root))
1554                 return root;
1555 
1556         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1557         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1558                                         GFP_NOFS);
1559         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1560                 ret = -ENOMEM;
1561                 goto fail;
1562         }
1563 
1564         btrfs_init_free_ino_ctl(root);
1565         mutex_init(&root->fs_commit_mutex);
1566         spin_lock_init(&root->cache_lock);
1567         init_waitqueue_head(&root->cache_wait);
1568 
1569         ret = get_anon_bdev(&root->anon_dev);
1570         if (ret)
1571                 goto fail;
1572 
1573         if (btrfs_root_refs(&root->root_item) == 0) {
1574                 ret = -ENOENT;
1575                 goto fail;
1576         }
1577 
1578         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1579         if (ret < 0)
1580                 goto fail;
1581         if (ret == 0)
1582                 root->orphan_item_inserted = 1;
1583 
1584         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1585         if (ret)
1586                 goto fail;
1587 
1588         spin_lock(&fs_info->fs_roots_radix_lock);
1589         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1590                                 (unsigned long)root->root_key.objectid,
1591                                 root);
1592         if (ret == 0)
1593                 root->in_radix = 1;
1594 
1595         spin_unlock(&fs_info->fs_roots_radix_lock);
1596         radix_tree_preload_end();
1597         if (ret) {
1598                 if (ret == -EEXIST) {
1599                         free_fs_root(root);
1600                         goto again;
1601                 }
1602                 goto fail;
1603         }
1604 
1605         ret = btrfs_find_dead_roots(fs_info->tree_root,
1606                                     root->root_key.objectid);
1607         WARN_ON(ret);
1608         return root;
1609 fail:
1610         free_fs_root(root);
1611         return ERR_PTR(ret);
1612 }
1613 
1614 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1615 {
1616         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1617         int ret = 0;
1618         struct btrfs_device *device;
1619         struct backing_dev_info *bdi;
1620 
1621         rcu_read_lock();
1622         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1623                 if (!device->bdev)
1624                         continue;
1625                 bdi = blk_get_backing_dev_info(device->bdev);
1626                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1627                         ret = 1;
1628                         break;
1629                 }
1630         }
1631         rcu_read_unlock();
1632         return ret;
1633 }
1634 
1635 /*
1636  * If this fails, caller must call bdi_destroy() to get rid of the
1637  * bdi again.
1638  */
1639 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1640 {
1641         int err;
1642 
1643         bdi->capabilities = BDI_CAP_MAP_COPY;
1644         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1645         if (err)
1646                 return err;
1647 
1648         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1649         bdi->congested_fn       = btrfs_congested_fn;
1650         bdi->congested_data     = info;
1651         return 0;
1652 }
1653 
1654 /*
1655  * called by the kthread helper functions to finally call the bio end_io
1656  * functions.  This is where read checksum verification actually happens
1657  */
1658 static void end_workqueue_fn(struct btrfs_work *work)
1659 {
1660         struct bio *bio;
1661         struct end_io_wq *end_io_wq;
1662         struct btrfs_fs_info *fs_info;
1663         int error;
1664 
1665         end_io_wq = container_of(work, struct end_io_wq, work);
1666         bio = end_io_wq->bio;
1667         fs_info = end_io_wq->info;
1668 
1669         error = end_io_wq->error;
1670         bio->bi_private = end_io_wq->private;
1671         bio->bi_end_io = end_io_wq->end_io;
1672         kfree(end_io_wq);
1673         bio_endio(bio, error);
1674 }
1675 
1676 static int cleaner_kthread(void *arg)
1677 {
1678         struct btrfs_root *root = arg;
1679 
1680         do {
1681                 int again = 0;
1682 
1683                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1684                     down_read_trylock(&root->fs_info->sb->s_umount)) {
1685                         if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
1686                                 btrfs_run_delayed_iputs(root);
1687                                 again = btrfs_clean_one_deleted_snapshot(root);
1688                                 mutex_unlock(&root->fs_info->cleaner_mutex);
1689                         }
1690                         btrfs_run_defrag_inodes(root->fs_info);
1691                         up_read(&root->fs_info->sb->s_umount);
1692                 }
1693 
1694                 if (!try_to_freeze() && !again) {
1695                         set_current_state(TASK_INTERRUPTIBLE);
1696                         if (!kthread_should_stop())
1697                                 schedule();
1698                         __set_current_state(TASK_RUNNING);
1699                 }
1700         } while (!kthread_should_stop());
1701         return 0;
1702 }
1703 
1704 static int transaction_kthread(void *arg)
1705 {
1706         struct btrfs_root *root = arg;
1707         struct btrfs_trans_handle *trans;
1708         struct btrfs_transaction *cur;
1709         u64 transid;
1710         unsigned long now;
1711         unsigned long delay;
1712         bool cannot_commit;
1713 
1714         do {
1715                 cannot_commit = false;
1716                 delay = HZ * 30;
1717                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1718 
1719                 spin_lock(&root->fs_info->trans_lock);
1720                 cur = root->fs_info->running_transaction;
1721                 if (!cur) {
1722                         spin_unlock(&root->fs_info->trans_lock);
1723                         goto sleep;
1724                 }
1725 
1726                 now = get_seconds();
1727                 if (!cur->blocked &&
1728                     (now < cur->start_time || now - cur->start_time < 30)) {
1729                         spin_unlock(&root->fs_info->trans_lock);
1730                         delay = HZ * 5;
1731                         goto sleep;
1732                 }
1733                 transid = cur->transid;
1734                 spin_unlock(&root->fs_info->trans_lock);
1735 
1736                 /* If the file system is aborted, this will always fail. */
1737                 trans = btrfs_attach_transaction(root);
1738                 if (IS_ERR(trans)) {
1739                         if (PTR_ERR(trans) != -ENOENT)
1740                                 cannot_commit = true;
1741                         goto sleep;
1742                 }
1743                 if (transid == trans->transid) {
1744                         btrfs_commit_transaction(trans, root);
1745                 } else {
1746                         btrfs_end_transaction(trans, root);
1747                 }
1748 sleep:
1749                 wake_up_process(root->fs_info->cleaner_kthread);
1750                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1751 
1752                 if (!try_to_freeze()) {
1753                         set_current_state(TASK_INTERRUPTIBLE);
1754                         if (!kthread_should_stop() &&
1755                             (!btrfs_transaction_blocked(root->fs_info) ||
1756                              cannot_commit))
1757                                 schedule_timeout(delay);
1758                         __set_current_state(TASK_RUNNING);
1759                 }
1760         } while (!kthread_should_stop());
1761         return 0;
1762 }
1763 
1764 /*
1765  * this will find the highest generation in the array of
1766  * root backups.  The index of the highest array is returned,
1767  * or -1 if we can't find anything.
1768  *
1769  * We check to make sure the array is valid by comparing the
1770  * generation of the latest  root in the array with the generation
1771  * in the super block.  If they don't match we pitch it.
1772  */
1773 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1774 {
1775         u64 cur;
1776         int newest_index = -1;
1777         struct btrfs_root_backup *root_backup;
1778         int i;
1779 
1780         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1781                 root_backup = info->super_copy->super_roots + i;
1782                 cur = btrfs_backup_tree_root_gen(root_backup);
1783                 if (cur == newest_gen)
1784                         newest_index = i;
1785         }
1786 
1787         /* check to see if we actually wrapped around */
1788         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1789                 root_backup = info->super_copy->super_roots;
1790                 cur = btrfs_backup_tree_root_gen(root_backup);
1791                 if (cur == newest_gen)
1792                         newest_index = 0;
1793         }
1794         return newest_index;
1795 }
1796 
1797 
1798 /*
1799  * find the oldest backup so we know where to store new entries
1800  * in the backup array.  This will set the backup_root_index
1801  * field in the fs_info struct
1802  */
1803 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1804                                      u64 newest_gen)
1805 {
1806         int newest_index = -1;
1807 
1808         newest_index = find_newest_super_backup(info, newest_gen);
1809         /* if there was garbage in there, just move along */
1810         if (newest_index == -1) {
1811                 info->backup_root_index = 0;
1812         } else {
1813                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1814         }
1815 }
1816 
1817 /*
1818  * copy all the root pointers into the super backup array.
1819  * this will bump the backup pointer by one when it is
1820  * done
1821  */
1822 static void backup_super_roots(struct btrfs_fs_info *info)
1823 {
1824         int next_backup;
1825         struct btrfs_root_backup *root_backup;
1826         int last_backup;
1827 
1828         next_backup = info->backup_root_index;
1829         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1830                 BTRFS_NUM_BACKUP_ROOTS;
1831 
1832         /*
1833          * just overwrite the last backup if we're at the same generation
1834          * this happens only at umount
1835          */
1836         root_backup = info->super_for_commit->super_roots + last_backup;
1837         if (btrfs_backup_tree_root_gen(root_backup) ==
1838             btrfs_header_generation(info->tree_root->node))
1839                 next_backup = last_backup;
1840 
1841         root_backup = info->super_for_commit->super_roots + next_backup;
1842 
1843         /*
1844          * make sure all of our padding and empty slots get zero filled
1845          * regardless of which ones we use today
1846          */
1847         memset(root_backup, 0, sizeof(*root_backup));
1848 
1849         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1850 
1851         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1852         btrfs_set_backup_tree_root_gen(root_backup,
1853                                btrfs_header_generation(info->tree_root->node));
1854 
1855         btrfs_set_backup_tree_root_level(root_backup,
1856                                btrfs_header_level(info->tree_root->node));
1857 
1858         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1859         btrfs_set_backup_chunk_root_gen(root_backup,
1860                                btrfs_header_generation(info->chunk_root->node));
1861         btrfs_set_backup_chunk_root_level(root_backup,
1862                                btrfs_header_level(info->chunk_root->node));
1863 
1864         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1865         btrfs_set_backup_extent_root_gen(root_backup,
1866                                btrfs_header_generation(info->extent_root->node));
1867         btrfs_set_backup_extent_root_level(root_backup,
1868                                btrfs_header_level(info->extent_root->node));
1869 
1870         /*
1871          * we might commit during log recovery, which happens before we set
1872          * the fs_root.  Make sure it is valid before we fill it in.
1873          */
1874         if (info->fs_root && info->fs_root->node) {
1875                 btrfs_set_backup_fs_root(root_backup,
1876                                          info->fs_root->node->start);
1877                 btrfs_set_backup_fs_root_gen(root_backup,
1878                                btrfs_header_generation(info->fs_root->node));
1879                 btrfs_set_backup_fs_root_level(root_backup,
1880                                btrfs_header_level(info->fs_root->node));
1881         }
1882 
1883         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1884         btrfs_set_backup_dev_root_gen(root_backup,
1885                                btrfs_header_generation(info->dev_root->node));
1886         btrfs_set_backup_dev_root_level(root_backup,
1887                                        btrfs_header_level(info->dev_root->node));
1888 
1889         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1890         btrfs_set_backup_csum_root_gen(root_backup,
1891                                btrfs_header_generation(info->csum_root->node));
1892         btrfs_set_backup_csum_root_level(root_backup,
1893                                btrfs_header_level(info->csum_root->node));
1894 
1895         btrfs_set_backup_total_bytes(root_backup,
1896                              btrfs_super_total_bytes(info->super_copy));
1897         btrfs_set_backup_bytes_used(root_backup,
1898                              btrfs_super_bytes_used(info->super_copy));
1899         btrfs_set_backup_num_devices(root_backup,
1900                              btrfs_super_num_devices(info->super_copy));
1901 
1902         /*
1903          * if we don't copy this out to the super_copy, it won't get remembered
1904          * for the next commit
1905          */
1906         memcpy(&info->super_copy->super_roots,
1907                &info->super_for_commit->super_roots,
1908                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1909 }
1910 
1911 /*
1912  * this copies info out of the root backup array and back into
1913  * the in-memory super block.  It is meant to help iterate through
1914  * the array, so you send it the number of backups you've already
1915  * tried and the last backup index you used.
1916  *
1917  * this returns -1 when it has tried all the backups
1918  */
1919 static noinline int next_root_backup(struct btrfs_fs_info *info,
1920                                      struct btrfs_super_block *super,
1921                                      int *num_backups_tried, int *backup_index)
1922 {
1923         struct btrfs_root_backup *root_backup;
1924         int newest = *backup_index;
1925 
1926         if (*num_backups_tried == 0) {
1927                 u64 gen = btrfs_super_generation(super);
1928 
1929                 newest = find_newest_super_backup(info, gen);
1930                 if (newest == -1)
1931                         return -1;
1932 
1933                 *backup_index = newest;
1934                 *num_backups_tried = 1;
1935         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1936                 /* we've tried all the backups, all done */
1937                 return -1;
1938         } else {
1939                 /* jump to the next oldest backup */
1940                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1941                         BTRFS_NUM_BACKUP_ROOTS;
1942                 *backup_index = newest;
1943                 *num_backups_tried += 1;
1944         }
1945         root_backup = super->super_roots + newest;
1946 
1947         btrfs_set_super_generation(super,
1948                                    btrfs_backup_tree_root_gen(root_backup));
1949         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1950         btrfs_set_super_root_level(super,
1951                                    btrfs_backup_tree_root_level(root_backup));
1952         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1953 
1954         /*
1955          * fixme: the total bytes and num_devices need to match or we should
1956          * need a fsck
1957          */
1958         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1959         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1960         return 0;
1961 }
1962 
1963 /* helper to cleanup workers */
1964 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1965 {
1966         btrfs_stop_workers(&fs_info->generic_worker);
1967         btrfs_stop_workers(&fs_info->fixup_workers);
1968         btrfs_stop_workers(&fs_info->delalloc_workers);
1969         btrfs_stop_workers(&fs_info->workers);
1970         btrfs_stop_workers(&fs_info->endio_workers);
1971         btrfs_stop_workers(&fs_info->endio_meta_workers);
1972         btrfs_stop_workers(&fs_info->endio_raid56_workers);
1973         btrfs_stop_workers(&fs_info->rmw_workers);
1974         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1975         btrfs_stop_workers(&fs_info->endio_write_workers);
1976         btrfs_stop_workers(&fs_info->endio_freespace_worker);
1977         btrfs_stop_workers(&fs_info->submit_workers);
1978         btrfs_stop_workers(&fs_info->delayed_workers);
1979         btrfs_stop_workers(&fs_info->caching_workers);
1980         btrfs_stop_workers(&fs_info->readahead_workers);
1981         btrfs_stop_workers(&fs_info->flush_workers);
1982         btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
1983 }
1984 
1985 /* helper to cleanup tree roots */
1986 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1987 {
1988         free_extent_buffer(info->tree_root->node);
1989         free_extent_buffer(info->tree_root->commit_root);
1990         info->tree_root->node = NULL;
1991         info->tree_root->commit_root = NULL;
1992 
1993         if (info->dev_root) {
1994                 free_extent_buffer(info->dev_root->node);
1995                 free_extent_buffer(info->dev_root->commit_root);
1996                 info->dev_root->node = NULL;
1997                 info->dev_root->commit_root = NULL;
1998         }
1999         if (info->extent_root) {
2000                 free_extent_buffer(info->extent_root->node);
2001                 free_extent_buffer(info->extent_root->commit_root);
2002                 info->extent_root->node = NULL;
2003                 info->extent_root->commit_root = NULL;
2004         }
2005         if (info->csum_root) {
2006                 free_extent_buffer(info->csum_root->node);
2007                 free_extent_buffer(info->csum_root->commit_root);
2008                 info->csum_root->node = NULL;
2009                 info->csum_root->commit_root = NULL;
2010         }
2011         if (info->quota_root) {
2012                 free_extent_buffer(info->quota_root->node);
2013                 free_extent_buffer(info->quota_root->commit_root);
2014                 info->quota_root->node = NULL;
2015                 info->quota_root->commit_root = NULL;
2016         }
2017         if (chunk_root) {
2018                 free_extent_buffer(info->chunk_root->node);
2019                 free_extent_buffer(info->chunk_root->commit_root);
2020                 info->chunk_root->node = NULL;
2021                 info->chunk_root->commit_root = NULL;
2022         }
2023 }
2024 
2025 static void del_fs_roots(struct btrfs_fs_info *fs_info)
2026 {
2027         int ret;
2028         struct btrfs_root *gang[8];
2029         int i;
2030 
2031         while (!list_empty(&fs_info->dead_roots)) {
2032                 gang[0] = list_entry(fs_info->dead_roots.next,
2033                                      struct btrfs_root, root_list);
2034                 list_del(&gang[0]->root_list);
2035 
2036                 if (gang[0]->in_radix) {
2037                         btrfs_free_fs_root(fs_info, gang[0]);
2038                 } else {
2039                         free_extent_buffer(gang[0]->node);
2040                         free_extent_buffer(gang[0]->commit_root);
2041                         kfree(gang[0]);
2042                 }
2043         }
2044 
2045         while (1) {
2046                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2047                                              (void **)gang, 0,
2048                                              ARRAY_SIZE(gang));
2049                 if (!ret)
2050                         break;
2051                 for (i = 0; i < ret; i++)
2052                         btrfs_free_fs_root(fs_info, gang[i]);
2053         }
2054 }
2055 
2056 int open_ctree(struct super_block *sb,
2057                struct btrfs_fs_devices *fs_devices,
2058                char *options)
2059 {
2060         u32 sectorsize;
2061         u32 nodesize;
2062         u32 leafsize;
2063         u32 blocksize;
2064         u32 stripesize;
2065         u64 generation;
2066         u64 features;
2067         struct btrfs_key location;
2068         struct buffer_head *bh;
2069         struct btrfs_super_block *disk_super;
2070         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2071         struct btrfs_root *tree_root;
2072         struct btrfs_root *extent_root;
2073         struct btrfs_root *csum_root;
2074         struct btrfs_root *chunk_root;
2075         struct btrfs_root *dev_root;
2076         struct btrfs_root *quota_root;
2077         struct btrfs_root *log_tree_root;
2078         int ret;
2079         int err = -EINVAL;
2080         int num_backups_tried = 0;
2081         int backup_index = 0;
2082 
2083         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2084         extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
2085         csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
2086         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2087         dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
2088         quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
2089 
2090         if (!tree_root || !extent_root || !csum_root ||
2091             !chunk_root || !dev_root || !quota_root) {
2092                 err = -ENOMEM;
2093                 goto fail;
2094         }
2095 
2096         ret = init_srcu_struct(&fs_info->subvol_srcu);
2097         if (ret) {
2098                 err = ret;
2099                 goto fail;
2100         }
2101 
2102         ret = setup_bdi(fs_info, &fs_info->bdi);
2103         if (ret) {
2104                 err = ret;
2105                 goto fail_srcu;
2106         }
2107 
2108         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2109         if (ret) {
2110                 err = ret;
2111                 goto fail_bdi;
2112         }
2113         fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2114                                         (1 + ilog2(nr_cpu_ids));
2115 
2116         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2117         if (ret) {
2118                 err = ret;
2119                 goto fail_dirty_metadata_bytes;
2120         }
2121 
2122         fs_info->btree_inode = new_inode(sb);
2123         if (!fs_info->btree_inode) {
2124                 err = -ENOMEM;
2125                 goto fail_delalloc_bytes;
2126         }
2127 
2128         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2129 
2130         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2131         INIT_LIST_HEAD(&fs_info->trans_list);
2132         INIT_LIST_HEAD(&fs_info->dead_roots);
2133         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2134         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
2135         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2136         spin_lock_init(&fs_info->delalloc_lock);
2137         spin_lock_init(&fs_info->trans_lock);
2138         spin_lock_init(&fs_info->fs_roots_radix_lock);
2139         spin_lock_init(&fs_info->delayed_iput_lock);
2140         spin_lock_init(&fs_info->defrag_inodes_lock);
2141         spin_lock_init(&fs_info->free_chunk_lock);
2142         spin_lock_init(&fs_info->tree_mod_seq_lock);
2143         spin_lock_init(&fs_info->super_lock);
2144         rwlock_init(&fs_info->tree_mod_log_lock);
2145         mutex_init(&fs_info->reloc_mutex);
2146         seqlock_init(&fs_info->profiles_lock);
2147 
2148         init_completion(&fs_info->kobj_unregister);
2149         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2150         INIT_LIST_HEAD(&fs_info->space_info);
2151         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2152         btrfs_mapping_init(&fs_info->mapping_tree);
2153         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2154                              BTRFS_BLOCK_RSV_GLOBAL);
2155         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2156                              BTRFS_BLOCK_RSV_DELALLOC);
2157         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2158         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2159         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2160         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2161                              BTRFS_BLOCK_RSV_DELOPS);
2162         atomic_set(&fs_info->nr_async_submits, 0);
2163         atomic_set(&fs_info->async_delalloc_pages, 0);
2164         atomic_set(&fs_info->async_submit_draining, 0);
2165         atomic_set(&fs_info->nr_async_bios, 0);
2166         atomic_set(&fs_info->defrag_running, 0);
2167         atomic64_set(&fs_info->tree_mod_seq, 0);
2168         fs_info->sb = sb;
2169         fs_info->max_inline = 8192 * 1024;
2170         fs_info->metadata_ratio = 0;
2171         fs_info->defrag_inodes = RB_ROOT;
2172         fs_info->trans_no_join = 0;
2173         fs_info->free_chunk_space = 0;
2174         fs_info->tree_mod_log = RB_ROOT;
2175 
2176         /* readahead state */
2177         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2178         spin_lock_init(&fs_info->reada_lock);
2179 
2180         fs_info->thread_pool_size = min_t(unsigned long,
2181                                           num_online_cpus() + 2, 8);
2182 
2183         INIT_LIST_HEAD(&fs_info->ordered_extents);
2184         spin_lock_init(&fs_info->ordered_extent_lock);
2185         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2186                                         GFP_NOFS);
2187         if (!fs_info->delayed_root) {
2188                 err = -ENOMEM;
2189                 goto fail_iput;
2190         }
2191         btrfs_init_delayed_root(fs_info->delayed_root);
2192 
2193         mutex_init(&fs_info->scrub_lock);
2194         atomic_set(&fs_info->scrubs_running, 0);
2195         atomic_set(&fs_info->scrub_pause_req, 0);
2196         atomic_set(&fs_info->scrubs_paused, 0);
2197         atomic_set(&fs_info->scrub_cancel_req, 0);
2198         init_waitqueue_head(&fs_info->scrub_pause_wait);
2199         init_rwsem(&fs_info->scrub_super_lock);
2200         fs_info->scrub_workers_refcnt = 0;
2201 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2202         fs_info->check_integrity_print_mask = 0;
2203 #endif
2204 
2205         spin_lock_init(&fs_info->balance_lock);
2206         mutex_init(&fs_info->balance_mutex);
2207         atomic_set(&fs_info->balance_running, 0);
2208         atomic_set(&fs_info->balance_pause_req, 0);
2209         atomic_set(&fs_info->balance_cancel_req, 0);
2210         fs_info->balance_ctl = NULL;
2211         init_waitqueue_head(&fs_info->balance_wait_q);
2212 
2213         sb->s_blocksize = 4096;
2214         sb->s_blocksize_bits = blksize_bits(4096);
2215         sb->s_bdi = &fs_info->bdi;
2216 
2217         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2218         set_nlink(fs_info->btree_inode, 1);
2219         /*
2220          * we set the i_size on the btree inode to the max possible int.
2221          * the real end of the address space is determined by all of
2222          * the devices in the system
2223          */
2224         fs_info->btree_inode->i_size = OFFSET_MAX;
2225         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2226         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2227 
2228         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2229         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2230                              fs_info->btree_inode->i_mapping);
2231         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2232         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2233 
2234         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2235 
2236         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2237         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2238                sizeof(struct btrfs_key));
2239         set_bit(BTRFS_INODE_DUMMY,
2240                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2241         insert_inode_hash(fs_info->btree_inode);
2242 
2243         spin_lock_init(&fs_info->block_group_cache_lock);
2244         fs_info->block_group_cache_tree = RB_ROOT;
2245         fs_info->first_logical_byte = (u64)-1;
2246 
2247         extent_io_tree_init(&fs_info->freed_extents[0],
2248                              fs_info->btree_inode->i_mapping);
2249         extent_io_tree_init(&fs_info->freed_extents[1],
2250                              fs_info->btree_inode->i_mapping);
2251         fs_info->pinned_extents = &fs_info->freed_extents[0];
2252         fs_info->do_barriers = 1;
2253 
2254 
2255         mutex_init(&fs_info->ordered_operations_mutex);
2256         mutex_init(&fs_info->tree_log_mutex);
2257         mutex_init(&fs_info->chunk_mutex);
2258         mutex_init(&fs_info->transaction_kthread_mutex);
2259         mutex_init(&fs_info->cleaner_mutex);
2260         mutex_init(&fs_info->volume_mutex);
2261         init_rwsem(&fs_info->extent_commit_sem);
2262         init_rwsem(&fs_info->cleanup_work_sem);
2263         init_rwsem(&fs_info->subvol_sem);
2264         fs_info->dev_replace.lock_owner = 0;
2265         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2266         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2267         mutex_init(&fs_info->dev_replace.lock_management_lock);
2268         mutex_init(&fs_info->dev_replace.lock);
2269 
2270         spin_lock_init(&fs_info->qgroup_lock);
2271         mutex_init(&fs_info->qgroup_ioctl_lock);
2272         fs_info->qgroup_tree = RB_ROOT;
2273         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2274         fs_info->qgroup_seq = 1;
2275         fs_info->quota_enabled = 0;
2276         fs_info->pending_quota_state = 0;
2277         mutex_init(&fs_info->qgroup_rescan_lock);
2278 
2279         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2280         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2281 
2282         init_waitqueue_head(&fs_info->transaction_throttle);
2283         init_waitqueue_head(&fs_info->transaction_wait);
2284         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2285         init_waitqueue_head(&fs_info->async_submit_wait);
2286 
2287         ret = btrfs_alloc_stripe_hash_table(fs_info);
2288         if (ret) {
2289                 err = ret;
2290                 goto fail_alloc;
2291         }
2292 
2293         __setup_root(4096, 4096, 4096, 4096, tree_root,
2294                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2295 
2296         invalidate_bdev(fs_devices->latest_bdev);
2297 
2298         /*
2299          * Read super block and check the signature bytes only
2300          */
2301         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2302         if (!bh) {
2303                 err = -EINVAL;
2304                 goto fail_alloc;
2305         }
2306 
2307         /*
2308          * We want to check superblock checksum, the type is stored inside.
2309          * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2310          */
2311         if (btrfs_check_super_csum(bh->b_data)) {
2312                 printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
2313                 err = -EINVAL;
2314                 goto fail_alloc;
2315         }
2316 
2317         /*
2318          * super_copy is zeroed at allocation time and we never touch the
2319          * following bytes up to INFO_SIZE, the checksum is calculated from
2320          * the whole block of INFO_SIZE
2321          */
2322         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2323         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2324                sizeof(*fs_info->super_for_commit));
2325         brelse(bh);
2326 
2327         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2328 
2329         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2330         if (ret) {
2331                 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2332                 err = -EINVAL;
2333                 goto fail_alloc;
2334         }
2335 
2336         disk_super = fs_info->super_copy;
2337         if (!btrfs_super_root(disk_super))
2338                 goto fail_alloc;
2339 
2340         /* check FS state, whether FS is broken. */
2341         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2342                 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2343 
2344         /*
2345          * run through our array of backup supers and setup
2346          * our ring pointer to the oldest one
2347          */
2348         generation = btrfs_super_generation(disk_super);
2349         find_oldest_super_backup(fs_info, generation);
2350 
2351         /*
2352          * In the long term, we'll store the compression type in the super
2353          * block, and it'll be used for per file compression control.
2354          */
2355         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2356 
2357         ret = btrfs_parse_options(tree_root, options);
2358         if (ret) {
2359                 err = ret;
2360                 goto fail_alloc;
2361         }
2362 
2363         features = btrfs_super_incompat_flags(disk_super) &
2364                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2365         if (features) {
2366                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2367                        "unsupported optional features (%Lx).\n",
2368                        (unsigned long long)features);
2369                 err = -EINVAL;
2370                 goto fail_alloc;
2371         }
2372 
2373         if (btrfs_super_leafsize(disk_super) !=
2374             btrfs_super_nodesize(disk_super)) {
2375                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2376                        "blocksizes don't match.  node %d leaf %d\n",
2377                        btrfs_super_nodesize(disk_super),
2378                        btrfs_super_leafsize(disk_super));
2379                 err = -EINVAL;
2380                 goto fail_alloc;
2381         }
2382         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2383                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2384                        "blocksize (%d) was too large\n",
2385                        btrfs_super_leafsize(disk_super));
2386                 err = -EINVAL;
2387                 goto fail_alloc;
2388         }
2389 
2390         features = btrfs_super_incompat_flags(disk_super);
2391         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2392         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2393                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2394 
2395         if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2396                 printk(KERN_ERR "btrfs: has skinny extents\n");
2397 
2398         /*
2399          * flag our filesystem as having big metadata blocks if
2400          * they are bigger than the page size
2401          */
2402         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2403                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2404                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2405                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2406         }
2407 
2408         nodesize = btrfs_super_nodesize(disk_super);
2409         leafsize = btrfs_super_leafsize(disk_super);
2410         sectorsize = btrfs_super_sectorsize(disk_super);
2411         stripesize = btrfs_super_stripesize(disk_super);
2412         fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2413         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2414 
2415         /*
2416          * mixed block groups end up with duplicate but slightly offset
2417          * extent buffers for the same range.  It leads to corruptions
2418          */
2419         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2420             (sectorsize != leafsize)) {
2421                 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2422                                 "are not allowed for mixed block groups on %s\n",
2423                                 sb->s_id);
2424                 goto fail_alloc;
2425         }
2426 
2427         /*
2428          * Needn't use the lock because there is no other task which will
2429          * update the flag.
2430          */
2431         btrfs_set_super_incompat_flags(disk_super, features);
2432 
2433         features = btrfs_super_compat_ro_flags(disk_super) &
2434                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2435         if (!(sb->s_flags & MS_RDONLY) && features) {
2436                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2437                        "unsupported option features (%Lx).\n",
2438                        (unsigned long long)features);
2439                 err = -EINVAL;
2440                 brelse(bh);
2441                 goto fail_alloc;
2442         }
2443 
2444         btrfs_init_workers(&fs_info->generic_worker,
2445                            "genwork", 1, NULL);
2446 
2447         btrfs_init_workers(&fs_info->workers, "worker",
2448                            fs_info->thread_pool_size,
2449                            &fs_info->generic_worker);
2450 
2451         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2452                            fs_info->thread_pool_size,
2453                            &fs_info->generic_worker);
2454 
2455         btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
2456                            fs_info->thread_pool_size,
2457                            &fs_info->generic_worker);
2458 
2459         btrfs_init_workers(&fs_info->submit_workers, "submit",
2460                            min_t(u64, fs_devices->num_devices,
2461                            fs_info->thread_pool_size),
2462                            &fs_info->generic_worker);
2463 
2464         btrfs_init_workers(&fs_info->caching_workers, "cache",
2465                            2, &fs_info->generic_worker);
2466 
2467         /* a higher idle thresh on the submit workers makes it much more
2468          * likely that bios will be send down in a sane order to the
2469          * devices
2470          */
2471         fs_info->submit_workers.idle_thresh = 64;
2472 
2473         fs_info->workers.idle_thresh = 16;
2474         fs_info->workers.ordered = 1;
2475 
2476         fs_info->delalloc_workers.idle_thresh = 2;
2477         fs_info->delalloc_workers.ordered = 1;
2478 
2479         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2480                            &fs_info->generic_worker);
2481         btrfs_init_workers(&fs_info->endio_workers, "endio",
2482                            fs_info->thread_pool_size,
2483                            &fs_info->generic_worker);
2484         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2485                            fs_info->thread_pool_size,
2486                            &fs_info->generic_worker);
2487         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2488                            "endio-meta-write", fs_info->thread_pool_size,
2489                            &fs_info->generic_worker);
2490         btrfs_init_workers(&fs_info->endio_raid56_workers,
2491                            "endio-raid56", fs_info->thread_pool_size,
2492                            &fs_info->generic_worker);
2493         btrfs_init_workers(&fs_info->rmw_workers,
2494                            "rmw", fs_info->thread_pool_size,
2495                            &fs_info->generic_worker);
2496         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2497                            fs_info->thread_pool_size,
2498                            &fs_info->generic_worker);
2499         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2500                            1, &fs_info->generic_worker);
2501         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2502                            fs_info->thread_pool_size,
2503                            &fs_info->generic_worker);
2504         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2505                            fs_info->thread_pool_size,
2506                            &fs_info->generic_worker);
2507         btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
2508                            &fs_info->generic_worker);
2509 
2510         /*
2511          * endios are largely parallel and should have a very
2512          * low idle thresh
2513          */
2514         fs_info->endio_workers.idle_thresh = 4;
2515         fs_info->endio_meta_workers.idle_thresh = 4;
2516         fs_info->endio_raid56_workers.idle_thresh = 4;
2517         fs_info->rmw_workers.idle_thresh = 2;
2518 
2519         fs_info->endio_write_workers.idle_thresh = 2;
2520         fs_info->endio_meta_write_workers.idle_thresh = 2;
2521         fs_info->readahead_workers.idle_thresh = 2;
2522 
2523         /*
2524          * btrfs_start_workers can really only fail because of ENOMEM so just
2525          * return -ENOMEM if any of these fail.
2526          */
2527         ret = btrfs_start_workers(&fs_info->workers);
2528         ret |= btrfs_start_workers(&fs_info->generic_worker);
2529         ret |= btrfs_start_workers(&fs_info->submit_workers);
2530         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2531         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2532         ret |= btrfs_start_workers(&fs_info->endio_workers);
2533         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2534         ret |= btrfs_start_workers(&fs_info->rmw_workers);
2535         ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
2536         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2537         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2538         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2539         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2540         ret |= btrfs_start_workers(&fs_info->caching_workers);
2541         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2542         ret |= btrfs_start_workers(&fs_info->flush_workers);
2543         ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
2544         if (ret) {
2545                 err = -ENOMEM;
2546                 goto fail_sb_buffer;
2547         }
2548 
2549         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2550         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2551                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2552 
2553         tree_root->nodesize = nodesize;
2554         tree_root->leafsize = leafsize;
2555         tree_root->sectorsize = sectorsize;
2556         tree_root->stripesize = stripesize;
2557 
2558         sb->s_blocksize = sectorsize;
2559         sb->s_blocksize_bits = blksize_bits(sectorsize);
2560 
2561         if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2562                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2563                 goto fail_sb_buffer;
2564         }
2565 
2566         if (sectorsize != PAGE_SIZE) {
2567                 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2568                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2569                 goto fail_sb_buffer;
2570         }
2571 
2572         mutex_lock(&fs_info->chunk_mutex);
2573         ret = btrfs_read_sys_array(tree_root);
2574         mutex_unlock(&fs_info->chunk_mutex);
2575         if (ret) {
2576                 printk(KERN_WARNING "btrfs: failed to read the system "
2577                        "array on %s\n", sb->s_id);
2578                 goto fail_sb_buffer;
2579         }
2580 
2581         blocksize = btrfs_level_size(tree_root,
2582                                      btrfs_super_chunk_root_level(disk_super));
2583         generation = btrfs_super_chunk_root_generation(disk_super);
2584 
2585         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2586                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2587 
2588         chunk_root->node = read_tree_block(chunk_root,
2589                                            btrfs_super_chunk_root(disk_super),
2590                                            blocksize, generation);
2591         if (!chunk_root->node ||
2592             !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2593                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2594                        sb->s_id);
2595                 goto fail_tree_roots;
2596         }
2597         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2598         chunk_root->commit_root = btrfs_root_node(chunk_root);
2599 
2600         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2601            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2602            BTRFS_UUID_SIZE);
2603 
2604         ret = btrfs_read_chunk_tree(chunk_root);
2605         if (ret) {
2606                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2607                        sb->s_id);
2608                 goto fail_tree_roots;
2609         }
2610 
2611         /*
2612          * keep the device that is marked to be the target device for the
2613          * dev_replace procedure
2614          */
2615         btrfs_close_extra_devices(fs_info, fs_devices, 0);
2616 
2617         if (!fs_devices->latest_bdev) {
2618                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2619                        sb->s_id);
2620                 goto fail_tree_roots;
2621         }
2622 
2623 retry_root_backup:
2624         blocksize = btrfs_level_size(tree_root,
2625                                      btrfs_super_root_level(disk_super));
2626         generation = btrfs_super_generation(disk_super);
2627 
2628         tree_root->node = read_tree_block(tree_root,
2629                                           btrfs_super_root(disk_super),
2630                                           blocksize, generation);
2631         if (!tree_root->node ||
2632             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2633                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2634                        sb->s_id);
2635 
2636                 goto recovery_tree_root;
2637         }
2638 
2639         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2640         tree_root->commit_root = btrfs_root_node(tree_root);
2641 
2642         ret = find_and_setup_root(tree_root, fs_info,
2643                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2644         if (ret)
2645                 goto recovery_tree_root;
2646         extent_root->track_dirty = 1;
2647 
2648         ret = find_and_setup_root(tree_root, fs_info,
2649                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2650         if (ret)
2651                 goto recovery_tree_root;
2652         dev_root->track_dirty = 1;
2653 
2654         ret = find_and_setup_root(tree_root, fs_info,
2655                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2656         if (ret)
2657                 goto recovery_tree_root;
2658         csum_root->track_dirty = 1;
2659 
2660         ret = find_and_setup_root(tree_root, fs_info,
2661                                   BTRFS_QUOTA_TREE_OBJECTID, quota_root);
2662         if (ret) {
2663                 kfree(quota_root);
2664                 quota_root = fs_info->quota_root = NULL;
2665         } else {
2666                 quota_root->track_dirty = 1;
2667                 fs_info->quota_enabled = 1;
2668                 fs_info->pending_quota_state = 1;
2669         }
2670 
2671         fs_info->generation = generation;
2672         fs_info->last_trans_committed = generation;
2673 
2674         ret = btrfs_recover_balance(fs_info);
2675         if (ret) {
2676                 printk(KERN_WARNING "btrfs: failed to recover balance\n");
2677                 goto fail_block_groups;
2678         }
2679 
2680         ret = btrfs_init_dev_stats(fs_info);
2681         if (ret) {
2682                 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2683                        ret);
2684                 goto fail_block_groups;
2685         }
2686 
2687         ret = btrfs_init_dev_replace(fs_info);
2688         if (ret) {
2689                 pr_err("btrfs: failed to init dev_replace: %d\n", ret);
2690                 goto fail_block_groups;
2691         }
2692 
2693         btrfs_close_extra_devices(fs_info, fs_devices, 1);
2694 
2695         ret = btrfs_init_space_info(fs_info);
2696         if (ret) {
2697                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2698                 goto fail_block_groups;
2699         }
2700 
2701         ret = btrfs_read_block_groups(extent_root);
2702         if (ret) {
2703                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2704                 goto fail_block_groups;
2705         }
2706         fs_info->num_tolerated_disk_barrier_failures =
2707                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2708         if (fs_info->fs_devices->missing_devices >
2709              fs_info->num_tolerated_disk_barrier_failures &&
2710             !(sb->s_flags & MS_RDONLY)) {
2711                 printk(KERN_WARNING
2712                        "Btrfs: too many missing devices, writeable mount is not allowed\n");
2713                 goto fail_block_groups;
2714         }
2715 
2716         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2717                                                "btrfs-cleaner");
2718         if (IS_ERR(fs_info->cleaner_kthread))
2719                 goto fail_block_groups;
2720 
2721         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2722                                                    tree_root,
2723                                                    "btrfs-transaction");
2724         if (IS_ERR(fs_info->transaction_kthread))
2725                 goto fail_cleaner;
2726 
2727         if (!btrfs_test_opt(tree_root, SSD) &&
2728             !btrfs_test_opt(tree_root, NOSSD) &&
2729             !fs_info->fs_devices->rotating) {
2730                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2731                        "mode\n");
2732                 btrfs_set_opt(fs_info->mount_opt, SSD);
2733         }
2734 
2735 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2736         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2737                 ret = btrfsic_mount(tree_root, fs_devices,
2738                                     btrfs_test_opt(tree_root,
2739                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2740                                     1 : 0,
2741                                     fs_info->check_integrity_print_mask);
2742                 if (ret)
2743                         printk(KERN_WARNING "btrfs: failed to initialize"
2744                                " integrity check module %s\n", sb->s_id);
2745         }
2746 #endif
2747         ret = btrfs_read_qgroup_config(fs_info);
2748         if (ret)
2749                 goto fail_trans_kthread;
2750 
2751         /* do not make disk changes in broken FS */
2752         if (btrfs_super_log_root(disk_super) != 0) {
2753                 u64 bytenr = btrfs_super_log_root(disk_super);
2754 
2755                 if (fs_devices->rw_devices == 0) {
2756                         printk(KERN_WARNING "Btrfs log replay required "
2757                                "on RO media\n");
2758                         err = -EIO;
2759                         goto fail_qgroup;
2760                 }
2761                 blocksize =
2762                      btrfs_level_size(tree_root,
2763                                       btrfs_super_log_root_level(disk_super));
2764 
2765                 log_tree_root = btrfs_alloc_root(fs_info);
2766                 if (!log_tree_root) {
2767                         err = -ENOMEM;
2768                         goto fail_qgroup;
2769                 }
2770 
2771                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2772                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2773 
2774                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2775                                                       blocksize,
2776                                                       generation + 1);
2777                 if (!log_tree_root->node ||
2778                     !extent_buffer_uptodate(log_tree_root->node)) {
2779                         printk(KERN_ERR "btrfs: failed to read log tree\n");
2780                         free_extent_buffer(log_tree_root->node);
2781                         kfree(log_tree_root);
2782                         goto fail_trans_kthread;
2783                 }
2784                 /* returns with log_tree_root freed on success */
2785                 ret = btrfs_recover_log_trees(log_tree_root);
2786                 if (ret) {
2787                         btrfs_error(tree_root->fs_info, ret,
2788                                     "Failed to recover log tree");
2789                         free_extent_buffer(log_tree_root->node);
2790                         kfree(log_tree_root);
2791                         goto fail_trans_kthread;
2792                 }
2793 
2794                 if (sb->s_flags & MS_RDONLY) {
2795                         ret = btrfs_commit_super(tree_root);
2796                         if (ret)
2797                                 goto fail_trans_kthread;
2798                 }
2799         }
2800 
2801         ret = btrfs_find_orphan_roots(tree_root);
2802         if (ret)
2803                 goto fail_trans_kthread;
2804 
2805         if (!(sb->s_flags & MS_RDONLY)) {
2806                 ret = btrfs_cleanup_fs_roots(fs_info);
2807                 if (ret)
2808                         goto fail_trans_kthread;
2809 
2810                 ret = btrfs_recover_relocation(tree_root);
2811                 if (ret < 0) {
2812                         printk(KERN_WARNING
2813                                "btrfs: failed to recover relocation\n");
2814                         err = -EINVAL;
2815                         goto fail_qgroup;
2816                 }
2817         }
2818 
2819         location.objectid = BTRFS_FS_TREE_OBJECTID;
2820         location.type = BTRFS_ROOT_ITEM_KEY;
2821         location.offset = (u64)-1;
2822 
2823         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2824         if (!fs_info->fs_root)
2825                 goto fail_qgroup;
2826         if (IS_ERR(fs_info->fs_root)) {
2827                 err = PTR_ERR(fs_info->fs_root);
2828                 goto fail_qgroup;
2829         }
2830 
2831         if (sb->s_flags & MS_RDONLY)
2832                 return 0;
2833 
2834         down_read(&fs_info->cleanup_work_sem);
2835         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2836             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2837                 up_read(&fs_info->cleanup_work_sem);
2838                 close_ctree(tree_root);
2839                 return ret;
2840         }
2841         up_read(&fs_info->cleanup_work_sem);
2842 
2843         ret = btrfs_resume_balance_async(fs_info);
2844         if (ret) {
2845                 printk(KERN_WARNING "btrfs: failed to resume balance\n");
2846                 close_ctree(tree_root);
2847                 return ret;
2848         }
2849 
2850         ret = btrfs_resume_dev_replace_async(fs_info);
2851         if (ret) {
2852                 pr_warn("btrfs: failed to resume dev_replace\n");
2853                 close_ctree(tree_root);
2854                 return ret;
2855         }
2856 
2857         return 0;
2858 
2859 fail_qgroup:
2860         btrfs_free_qgroup_config(fs_info);
2861 fail_trans_kthread:
2862         kthread_stop(fs_info->transaction_kthread);
2863         btrfs_cleanup_transaction(fs_info->tree_root);
2864         del_fs_roots(fs_info);
2865 fail_cleaner:
2866         kthread_stop(fs_info->cleaner_kthread);
2867 
2868         /*
2869          * make sure we're done with the btree inode before we stop our
2870          * kthreads
2871          */
2872         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2873 
2874 fail_block_groups:
2875         btrfs_put_block_group_cache(fs_info);
2876         btrfs_free_block_groups(fs_info);
2877 
2878 fail_tree_roots:
2879         free_root_pointers(fs_info, 1);
2880         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2881 
2882 fail_sb_buffer:
2883         btrfs_stop_all_workers(fs_info);
2884 fail_alloc:
2885 fail_iput:
2886         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2887 
2888         iput(fs_info->btree_inode);
2889 fail_delalloc_bytes:
2890         percpu_counter_destroy(&fs_info->delalloc_bytes);
2891 fail_dirty_metadata_bytes:
2892         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2893 fail_bdi:
2894         bdi_destroy(&fs_info->bdi);
2895 fail_srcu:
2896         cleanup_srcu_struct(&fs_info->subvol_srcu);
2897 fail:
2898         btrfs_free_stripe_hash_table(fs_info);
2899         btrfs_close_devices(fs_info->fs_devices);
2900         return err;
2901 
2902 recovery_tree_root:
2903         if (!btrfs_test_opt(tree_root, RECOVERY))
2904                 goto fail_tree_roots;
2905 
2906         free_root_pointers(fs_info, 0);
2907 
2908         /* don't use the log in recovery mode, it won't be valid */
2909         btrfs_set_super_log_root(disk_super, 0);
2910 
2911         /* we can't trust the free space cache either */
2912         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2913 
2914         ret = next_root_backup(fs_info, fs_info->super_copy,
2915                                &num_backups_tried, &backup_index);
2916         if (ret == -1)
2917                 goto fail_block_groups;
2918         goto retry_root_backup;
2919 }
2920 
2921 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2922 {
2923         if (uptodate) {
2924                 set_buffer_uptodate(bh);
2925         } else {
2926                 struct btrfs_device *device = (struct btrfs_device *)
2927                         bh->b_private;
2928 
2929                 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2930                                           "I/O error on %s\n",
2931                                           rcu_str_deref(device->name));
2932                 /* note, we dont' set_buffer_write_io_error because we have
2933                  * our own ways of dealing with the IO errors
2934                  */
2935                 clear_buffer_uptodate(bh);
2936                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2937         }
2938         unlock_buffer(bh);
2939         put_bh(bh);
2940 }
2941 
2942 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2943 {
2944         struct buffer_head *bh;
2945         struct buffer_head *latest = NULL;
2946         struct btrfs_super_block *super;
2947         int i;
2948         u64 transid = 0;
2949         u64 bytenr;
2950 
2951         /* we would like to check all the supers, but that would make
2952          * a btrfs mount succeed after a mkfs from a different FS.
2953          * So, we need to add a special mount option to scan for
2954          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2955          */
2956         for (i = 0; i < 1; i++) {
2957                 bytenr = btrfs_sb_offset(i);
2958                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2959                         break;
2960                 bh = __bread(bdev, bytenr / 4096, 4096);
2961                 if (!bh)
2962                         continue;
2963 
2964                 super = (struct btrfs_super_block *)bh->b_data;
2965                 if (btrfs_super_bytenr(super) != bytenr ||
2966                     super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2967                         brelse(bh);
2968                         continue;
2969                 }
2970 
2971                 if (!latest || btrfs_super_generation(super) > transid) {
2972                         brelse(latest);
2973                         latest = bh;
2974                         transid = btrfs_super_generation(super);
2975                 } else {
2976                         brelse(bh);
2977                 }
2978         }
2979         return latest;
2980 }
2981 
2982 /*
2983  * this should be called twice, once with wait == 0 and
2984  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2985  * we write are pinned.
2986  *
2987  * They are released when wait == 1 is done.
2988  * max_mirrors must be the same for both runs, and it indicates how
2989  * many supers on this one device should be written.
2990  *
2991  * max_mirrors == 0 means to write them all.
2992  */
2993 static int write_dev_supers(struct btrfs_device *device,
2994                             struct btrfs_super_block *sb,
2995                             int do_barriers, int wait, int max_mirrors)
2996 {
2997         struct buffer_head *bh;
2998         int i;
2999         int ret;
3000         int errors = 0;
3001         u32 crc;
3002         u64 bytenr;
3003 
3004         if (max_mirrors == 0)
3005                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3006 
3007         for (i = 0; i < max_mirrors; i++) {
3008                 bytenr = btrfs_sb_offset(i);
3009                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
3010                         break;
3011 
3012                 if (wait) {
3013                         bh = __find_get_block(device->bdev, bytenr / 4096,
3014                                               BTRFS_SUPER_INFO_SIZE);
3015                         if (!bh) {
3016                                 errors++;
3017                                 continue;
3018                         }
3019                         wait_on_buffer(bh);
3020                         if (!buffer_uptodate(bh))
3021                                 errors++;
3022 
3023                         /* drop our reference */
3024                         brelse(bh);
3025 
3026                         /* drop the reference from the wait == 0 run */
3027                         brelse(bh);
3028                         continue;
3029                 } else {
3030                         btrfs_set_super_bytenr(sb, bytenr);
3031 
3032                         crc = ~(u32)0;
3033                         crc = btrfs_csum_data((char *)sb +
3034                                               BTRFS_CSUM_SIZE, crc,
3035                                               BTRFS_SUPER_INFO_SIZE -
3036                                               BTRFS_CSUM_SIZE);
3037                         btrfs_csum_final(crc, sb->csum);
3038 
3039                         /*
3040                          * one reference for us, and we leave it for the
3041                          * caller
3042                          */
3043                         bh = __getblk(device->bdev, bytenr / 4096,
3044                                       BTRFS_SUPER_INFO_SIZE);
3045                         if (!bh) {
3046                                 printk(KERN_ERR "btrfs: couldn't get super "
3047                                        "buffer head for bytenr %Lu\n", bytenr);
3048                                 errors++;
3049                                 continue;
3050                         }
3051 
3052                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3053 
3054                         /* one reference for submit_bh */
3055                         get_bh(bh);
3056 
3057                         set_buffer_uptodate(bh);
3058                         lock_buffer(bh);
3059                         bh->b_end_io = btrfs_end_buffer_write_sync;
3060                         bh->b_private = device;
3061                 }
3062 
3063                 /*
3064                  * we fua the first super.  The others we allow
3065                  * to go down lazy.
3066                  */
3067                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
3068                 if (ret)
3069                         errors++;
3070         }
3071         return errors < i ? 0 : -1;
3072 }
3073 
3074 /*
3075  * endio for the write_dev_flush, this will wake anyone waiting
3076  * for the barrier when it is done
3077  */
3078 static void btrfs_end_empty_barrier(struct bio *bio, int err)
3079 {
3080         if (err) {
3081                 if (err == -EOPNOTSUPP)
3082                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3083                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3084         }
3085         if (bio->bi_private)
3086                 complete(bio->bi_private);
3087         bio_put(bio);
3088 }
3089 
3090 /*
3091  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3092  * sent down.  With wait == 1, it waits for the previous flush.
3093  *
3094  * any device where the flush fails with eopnotsupp are flagged as not-barrier
3095  * capable
3096  */
3097 static int write_dev_flush(struct btrfs_device *device, int wait)
3098 {
3099         struct bio *bio;
3100         int ret = 0;
3101 
3102         if (device->nobarriers)
3103                 return 0;
3104 
3105         if (wait) {
3106                 bio = device->flush_bio;
3107                 if (!bio)
3108                         return 0;
3109 
3110                 wait_for_completion(&device->flush_wait);
3111 
3112                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3113                         printk_in_rcu("btrfs: disabling barriers on dev %s\n",
3114                                       rcu_str_deref(device->name));
3115                         device->nobarriers = 1;
3116                 } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3117                         ret = -EIO;
3118                         btrfs_dev_stat_inc_and_print(device,
3119                                 BTRFS_DEV_STAT_FLUSH_ERRS);
3120                 }
3121 
3122                 /* drop the reference from the wait == 0 run */
3123                 bio_put(bio);
3124                 device->flush_bio = NULL;
3125 
3126                 return ret;
3127         }
3128 
3129         /*
3130          * one reference for us, and we leave it for the
3131          * caller
3132          */
3133         device->flush_bio = NULL;
3134         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3135         if (!bio)
3136                 return -ENOMEM;
3137 
3138         bio->bi_end_io = btrfs_end_empty_barrier;
3139         bio->bi_bdev = device->bdev;
3140         init_completion(&device->flush_wait);
3141         bio->bi_private = &device->flush_wait;
3142         device->flush_bio = bio;
3143 
3144         bio_get(bio);
3145         btrfsic_submit_bio(WRITE_FLUSH, bio);
3146 
3147         return 0;
3148 }
3149 
3150 /*
3151  * send an empty flush down to each device in parallel,
3152  * then wait for them
3153  */
3154 static int barrier_all_devices(struct btrfs_fs_info *info)
3155 {
3156         struct list_head *head;
3157         struct btrfs_device *dev;
3158         int errors_send = 0;
3159         int errors_wait = 0;
3160         int ret;
3161 
3162         /* send down all the barriers */
3163         head = &info->fs_devices->devices;
3164         list_for_each_entry_rcu(dev, head, dev_list) {
3165                 if (dev->missing)
3166                         continue;
3167                 if (!dev->bdev) {
3168                         errors_send++;
3169                         continue;
3170                 }
3171                 if (!dev->in_fs_metadata || !dev->writeable)
3172                         continue;
3173 
3174                 ret = write_dev_flush(dev, 0);
3175                 if (ret)
3176                         errors_send++;
3177         }
3178 
3179         /* wait for all the barriers */
3180         list_for_each_entry_rcu(dev, head, dev_list) {
3181                 if (dev->missing)
3182                         continue;
3183                 if (!dev->bdev) {
3184                         errors_wait++;
3185                         continue;
3186                 }
3187                 if (!dev->in_fs_metadata || !dev->writeable)
3188                         continue;
3189 
3190                 ret = write_dev_flush(dev, 1);
3191                 if (ret)
3192                         errors_wait++;
3193         }
3194         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3195             errors_wait > info->num_tolerated_disk_barrier_failures)
3196                 return -EIO;
3197         return 0;
3198 }
3199 
3200 int btrfs_calc_num_tolerated_disk_barrier_failures(
3201         struct btrfs_fs_info *fs_info)
3202 {
3203         struct btrfs_ioctl_space_info space;
3204         struct btrfs_space_info *sinfo;
3205         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3206                        BTRFS_BLOCK_GROUP_SYSTEM,
3207                        BTRFS_BLOCK_GROUP_METADATA,
3208                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3209         int num_types = 4;
3210         int i;
3211         int c;
3212         int num_tolerated_disk_barrier_failures =
3213                 (int)fs_info->fs_devices->num_devices;
3214 
3215         for (i = 0; i < num_types; i++) {
3216                 struct btrfs_space_info *tmp;
3217 
3218                 sinfo = NULL;
3219                 rcu_read_lock();
3220                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3221                         if (tmp->flags == types[i]) {
3222                                 sinfo = tmp;
3223                                 break;
3224                         }
3225                 }
3226                 rcu_read_unlock();
3227 
3228                 if (!sinfo)
3229                         continue;
3230 
3231                 down_read(&sinfo->groups_sem);
3232                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3233                         if (!list_empty(&sinfo->block_groups[c])) {
3234                                 u64 flags;
3235 
3236                                 btrfs_get_block_group_info(
3237                                         &sinfo->block_groups[c], &space);
3238                                 if (space.total_bytes == 0 ||
3239                                     space.used_bytes == 0)
3240                                         continue;
3241                                 flags = space.flags;
3242                                 /*
3243                                  * return
3244                                  * 0: if dup, single or RAID0 is configured for
3245                                  *    any of metadata, system or data, else
3246                                  * 1: if RAID5 is configured, or if RAID1 or
3247                                  *    RAID10 is configured and only two mirrors
3248                                  *    are used, else
3249                                  * 2: if RAID6 is configured, else
3250                                  * num_mirrors - 1: if RAID1 or RAID10 is
3251                                  *                  configured and more than
3252                                  *                  2 mirrors are used.
3253                                  */
3254                                 if (num_tolerated_disk_barrier_failures > 0 &&
3255                                     ((flags & (BTRFS_BLOCK_GROUP_DUP |
3256                                                BTRFS_BLOCK_GROUP_RAID0)) ||
3257                                      ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3258                                       == 0)))
3259                                         num_tolerated_disk_barrier_failures = 0;
3260                                 else if (num_tolerated_disk_barrier_failures > 1) {
3261                                         if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3262                                             BTRFS_BLOCK_GROUP_RAID5 |
3263                                             BTRFS_BLOCK_GROUP_RAID10)) {
3264                                                 num_tolerated_disk_barrier_failures = 1;
3265                                         } else if (flags &
3266                                                    BTRFS_BLOCK_GROUP_RAID5) {
3267                                                 num_tolerated_disk_barrier_failures = 2;
3268                                         }
3269                                 }
3270                         }
3271                 }
3272                 up_read(&sinfo->groups_sem);
3273         }
3274 
3275         return num_tolerated_disk_barrier_failures;
3276 }
3277 
3278 static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3279 {
3280         struct list_head *head;
3281         struct btrfs_device *dev;
3282         struct btrfs_super_block *sb;
3283         struct btrfs_dev_item *dev_item;
3284         int ret;
3285         int do_barriers;
3286         int max_errors;
3287         int total_errors = 0;
3288         u64 flags;
3289 
3290         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3291         do_barriers = !btrfs_test_opt(root, NOBARRIER);
3292         backup_super_roots(root->fs_info);
3293 
3294         sb = root->fs_info->super_for_commit;
3295         dev_item = &sb->dev_item;
3296 
3297         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3298         head = &root->fs_info->fs_devices->devices;
3299 
3300         if (do_barriers) {
3301                 ret = barrier_all_devices(root->fs_info);
3302                 if (ret) {
3303                         mutex_unlock(
3304                                 &root->fs_info->fs_devices->device_list_mutex);
3305                         btrfs_error(root->fs_info, ret,
3306                                     "errors while submitting device barriers.");
3307                         return ret;
3308                 }
3309         }
3310 
3311         list_for_each_entry_rcu(dev, head, dev_list) {
3312                 if (!dev->bdev) {
3313                         total_errors++;
3314                         continue;
3315                 }
3316                 if (!dev->in_fs_metadata || !dev->writeable)
3317                         continue;
3318 
3319                 btrfs_set_stack_device_generation(dev_item, 0);
3320                 btrfs_set_stack_device_type(dev_item, dev->type);
3321                 btrfs_set_stack_device_id(dev_item, dev->devid);
3322                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3323                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3324                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3325                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3326                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3327                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3328                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3329 
3330                 flags = btrfs_super_flags(sb);
3331                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3332 
3333                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3334                 if (ret)
3335                         total_errors++;
3336         }
3337         if (total_errors > max_errors) {
3338                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3339                        total_errors);
3340 
3341                 /* This shouldn't happen. FUA is masked off if unsupported */
3342                 BUG();
3343         }
3344 
3345         total_errors = 0;
3346         list_for_each_entry_rcu(dev, head, dev_list) {
3347                 if (!dev->bdev)
3348                         continue;
3349                 if (!dev->in_fs_metadata || !dev->writeable)
3350                         continue;
3351 
3352                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3353                 if (ret)
3354                         total_errors++;
3355         }
3356         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3357         if (total_errors > max_errors) {
3358                 btrfs_error(root->fs_info, -EIO,
3359                             "%d errors while writing supers", total_errors);
3360                 return -EIO;
3361         }
3362         return 0;
3363 }
3364 
3365 int write_ctree_super(struct btrfs_trans_handle *trans,
3366                       struct btrfs_root *root, int max_mirrors)
3367 {
3368         int ret;
3369 
3370         ret = write_all_supers(root, max_mirrors);
3371         return ret;
3372 }
3373 
3374 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3375 {
3376         spin_lock(&fs_info->fs_roots_radix_lock);
3377         radix_tree_delete(&fs_info->fs_roots_radix,
3378                           (unsigned long)root->root_key.objectid);
3379         spin_unlock(&fs_info->fs_roots_radix_lock);
3380 
3381         if (btrfs_root_refs(&root->root_item) == 0)
3382                 synchronize_srcu(&fs_info->subvol_srcu);
3383 
3384         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3385                 btrfs_free_log(NULL, root);
3386                 btrfs_free_log_root_tree(NULL, fs_info);
3387         }
3388 
3389         __btrfs_remove_free_space_cache(root->free_ino_pinned);
3390         __btrfs_remove_free_space_cache(root->free_ino_ctl);
3391         free_fs_root(root);
3392 }
3393 
3394 static void free_fs_root(struct btrfs_root *root)
3395 {
3396         iput(root->cache_inode);
3397         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3398         if (root->anon_dev)
3399                 free_anon_bdev(root->anon_dev);
3400         free_extent_buffer(root->node);
3401         free_extent_buffer(root->commit_root);
3402         kfree(root->free_ino_ctl);
3403         kfree(root->free_ino_pinned);
3404         kfree(root->name);
3405         kfree(root);
3406 }
3407 
3408 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3409 {
3410         u64 root_objectid = 0;
3411         struct btrfs_root *gang[8];
3412         int i;
3413         int ret;
3414 
3415         while (1) {
3416                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3417                                              (void **)gang, root_objectid,
3418                                              ARRAY_SIZE(gang));
3419                 if (!ret)
3420                         break;
3421 
3422                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3423                 for (i = 0; i < ret; i++) {
3424                         int err;
3425 
3426                         root_objectid = gang[i]->root_key.objectid;
3427                         err = btrfs_orphan_cleanup(gang[i]);
3428                         if (err)
3429                                 return err;
3430                 }
3431                 root_objectid++;
3432         }
3433         return 0;
3434 }
3435 
3436 int btrfs_commit_super(struct btrfs_root *root)
3437 {
3438         struct btrfs_trans_handle *trans;
3439         int ret;
3440 
3441         mutex_lock(&root->fs_info->cleaner_mutex);
3442         btrfs_run_delayed_iputs(root);
3443         mutex_unlock(&root->fs_info->cleaner_mutex);
3444         wake_up_process(root->fs_info->cleaner_kthread);
3445 
3446         /* wait until ongoing cleanup work done */
3447         down_write(&root->fs_info->cleanup_work_sem);
3448         up_write(&root->fs_info->cleanup_work_sem);
3449 
3450         trans = btrfs_join_transaction(root);
3451         if (IS_ERR(trans))
3452                 return PTR_ERR(trans);
3453         ret = btrfs_commit_transaction(trans, root);
3454         if (ret)
3455                 return ret;
3456         /* run commit again to drop the original snapshot */
3457         trans = btrfs_join_transaction(root);
3458         if (IS_ERR(trans))
3459                 return PTR_ERR(trans);
3460         ret = btrfs_commit_transaction(trans, root);
3461         if (ret)
3462                 return ret;
3463         ret = btrfs_write_and_wait_transaction(NULL, root);
3464         if (ret) {
3465                 btrfs_error(root->fs_info, ret,
3466                             "Failed to sync btree inode to disk.");
3467                 return ret;
3468         }
3469 
3470         ret = write_ctree_super(NULL, root, 0);
3471         return ret;
3472 }
3473 
3474 int close_ctree(struct btrfs_root *root)
3475 {
3476         struct btrfs_fs_info *fs_info = root->fs_info;
3477         int ret;
3478 
3479         fs_info->closing = 1;
3480         smp_mb();
3481 
3482         /* pause restriper - we want to resume on mount */
3483         btrfs_pause_balance(fs_info);
3484 
3485         btrfs_dev_replace_suspend_for_unmount(fs_info);
3486 
3487         btrfs_scrub_cancel(fs_info);
3488 
3489         /* wait for any defraggers to finish */
3490         wait_event(fs_info->transaction_wait,
3491                    (atomic_read(&fs_info->defrag_running) == 0));
3492 
3493         /* clear out the rbtree of defraggable inodes */
3494         btrfs_cleanup_defrag_inodes(fs_info);
3495 
3496         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3497                 ret = btrfs_commit_super(root);
3498                 if (ret)
3499                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3500         }
3501 
3502         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3503                 btrfs_error_commit_super(root);
3504 
3505         btrfs_put_block_group_cache(fs_info);
3506 
3507         kthread_stop(fs_info->transaction_kthread);
3508         kthread_stop(fs_info->cleaner_kthread);
3509 
3510         fs_info->closing = 2;
3511         smp_mb();
3512 
3513         btrfs_free_qgroup_config(root->fs_info);
3514 
3515         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3516                 printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3517                        percpu_counter_sum(&fs_info->delalloc_bytes));
3518         }
3519 
3520         btrfs_free_block_groups(fs_info);
3521 
3522         /*
3523          * we must make sure there is not any read request to
3524          * submit after we stopping all workers.
3525          */
3526         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3527         btrfs_stop_all_workers(fs_info);
3528 
3529         del_fs_roots(fs_info);
3530 
3531         free_root_pointers(fs_info, 1);
3532 
3533         iput(fs_info->btree_inode);
3534 
3535 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3536         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3537                 btrfsic_unmount(root, fs_info->fs_devices);
3538 #endif
3539 
3540         btrfs_close_devices(fs_info->fs_devices);
3541         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3542 
3543         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3544         percpu_counter_destroy(&fs_info->delalloc_bytes);
3545         bdi_destroy(&fs_info->bdi);
3546         cleanup_srcu_struct(&fs_info->subvol_srcu);
3547 
3548         btrfs_free_stripe_hash_table(fs_info);
3549 
3550         return 0;
3551 }
3552 
3553 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3554                           int atomic)
3555 {
3556         int ret;
3557         struct inode *btree_inode = buf->pages[0]->mapping->host;
3558 
3559         ret = extent_buffer_uptodate(buf);
3560         if (!ret)
3561                 return ret;
3562 
3563         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3564                                     parent_transid, atomic);
3565         if (ret == -EAGAIN)
3566                 return ret;
3567         return !ret;
3568 }
3569 
3570 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3571 {
3572         return set_extent_buffer_uptodate(buf);
3573 }
3574 
3575 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3576 {
3577         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3578         u64 transid = btrfs_header_generation(buf);
3579         int was_dirty;
3580 
3581         btrfs_assert_tree_locked(buf);
3582         if (transid != root->fs_info->generation)
3583                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3584                        "found %llu running %llu\n",
3585                         (unsigned long long)buf->start,
3586                         (unsigned long long)transid,
3587                         (unsigned long long)root->fs_info->generation);
3588         was_dirty = set_extent_buffer_dirty(buf);
3589         if (!was_dirty)
3590                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3591                                      buf->len,
3592                                      root->fs_info->dirty_metadata_batch);
3593 }
3594 
3595 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3596                                         int flush_delayed)
3597 {
3598         /*
3599          * looks as though older kernels can get into trouble with
3600          * this code, they end up stuck in balance_dirty_pages forever
3601          */
3602         int ret;
3603 
3604         if (current->flags & PF_MEMALLOC)
3605                 return;
3606 
3607         if (flush_delayed)
3608                 btrfs_balance_delayed_items(root);
3609 
3610         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3611                                      BTRFS_DIRTY_METADATA_THRESH);
3612         if (ret > 0) {
3613                 balance_dirty_pages_ratelimited(
3614                                    root->fs_info->btree_inode->i_mapping);
3615         }
3616         return;
3617 }
3618 
3619 void btrfs_btree_balance_dirty(struct btrfs_root *root)
3620 {
3621         __btrfs_btree_balance_dirty(root, 1);
3622 }
3623 
3624 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3625 {
3626         __btrfs_btree_balance_dirty(root, 0);
3627 }
3628 
3629 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3630 {
3631         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3632         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3633 }
3634 
3635 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3636                               int read_only)
3637 {
3638         /*
3639          * Placeholder for checks
3640          */
3641         return 0;
3642 }
3643 
3644 static void btrfs_error_commit_super(struct btrfs_root *root)
3645 {
3646         mutex_lock(&root->fs_info->cleaner_mutex);
3647         btrfs_run_delayed_iputs(root);
3648         mutex_unlock(&root->fs_info->cleaner_mutex);
3649 
3650         down_write(&root->fs_info->cleanup_work_sem);
3651         up_write(&root->fs_info->cleanup_work_sem);
3652 
3653         /* cleanup FS via transaction */
3654         btrfs_cleanup_transaction(root);
3655 }
3656 
3657 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3658                                              struct btrfs_root *root)
3659 {
3660         struct btrfs_inode *btrfs_inode;
3661         struct list_head splice;
3662 
3663         INIT_LIST_HEAD(&splice);
3664 
3665         mutex_lock(&root->fs_info->ordered_operations_mutex);
3666         spin_lock(&root->fs_info->ordered_extent_lock);
3667 
3668         list_splice_init(&t->ordered_operations, &splice);
3669         while (!list_empty(&splice)) {
3670                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3671                                          ordered_operations);
3672 
3673                 list_del_init(&btrfs_inode->ordered_operations);
3674                 spin_unlock(&root->fs_info->ordered_extent_lock);
3675 
3676                 btrfs_invalidate_inodes(btrfs_inode->root);
3677 
3678                 spin_lock(&root->fs_info->ordered_extent_lock);
3679         }
3680 
3681         spin_unlock(&root->fs_info->ordered_extent_lock);
3682         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3683 }
3684 
3685 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3686 {
3687         struct btrfs_ordered_extent *ordered;
3688 
3689         spin_lock(&root->fs_info->ordered_extent_lock);
3690         /*
3691          * This will just short circuit the ordered completion stuff which will
3692          * make sure the ordered extent gets properly cleaned up.
3693          */
3694         list_for_each_entry(ordered, &root->fs_info->ordered_extents,
3695                             root_extent_list)
3696                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3697         spin_unlock(&root->fs_info->ordered_extent_lock);
3698 }
3699 
3700 int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3701                                struct btrfs_root *root)
3702 {
3703         struct rb_node *node;
3704         struct btrfs_delayed_ref_root *delayed_refs;
3705         struct btrfs_delayed_ref_node *ref;
3706         int ret = 0;
3707 
3708         delayed_refs = &trans->delayed_refs;
3709 
3710         spin_lock(&delayed_refs->lock);
3711         if (delayed_refs->num_entries == 0) {
3712                 spin_unlock(&delayed_refs->lock);
3713                 printk(KERN_INFO "delayed_refs has NO entry\n");
3714                 return ret;
3715         }
3716 
3717         while ((node = rb_first(&delayed_refs->root)) != NULL) {
3718                 struct btrfs_delayed_ref_head *head = NULL;
3719 
3720                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3721                 atomic_set(&ref->refs, 1);
3722                 if (btrfs_delayed_ref_is_head(ref)) {
3723 
3724                         head = btrfs_delayed_node_to_head(ref);
3725                         if (!mutex_trylock(&head->mutex)) {
3726                                 atomic_inc(&ref->refs);
3727                                 spin_unlock(&delayed_refs->lock);
3728 
3729                                 /* Need to wait for the delayed ref to run */
3730                                 mutex_lock(&head->mutex);
3731                                 mutex_unlock(&head->mutex);
3732                                 btrfs_put_delayed_ref(ref);
3733 
3734                                 spin_lock(&delayed_refs->lock);
3735                                 continue;
3736                         }
3737 
3738                         if (head->must_insert_reserved)
3739                                 btrfs_pin_extent(root, ref->bytenr,
3740                                                  ref->num_bytes, 1);
3741                         btrfs_free_delayed_extent_op(head->extent_op);
3742                         delayed_refs->num_heads--;
3743                         if (list_empty(&head->cluster))
3744                                 delayed_refs->num_heads_ready--;
3745                         list_del_init(&head->cluster);
3746                 }
3747 
3748                 ref->in_tree = 0;
3749                 rb_erase(&ref->rb_node, &delayed_refs->root);
3750                 delayed_refs->num_entries--;
3751                 if (head)
3752                         mutex_unlock(&head->mutex);
3753                 spin_unlock(&delayed_refs->lock);
3754                 btrfs_put_delayed_ref(ref);
3755 
3756                 cond_resched();
3757                 spin_lock(&delayed_refs->lock);
3758         }
3759 
3760         spin_unlock(&delayed_refs->lock);
3761 
3762         return ret;
3763 }
3764 
3765 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
3766 {
3767         struct btrfs_pending_snapshot *snapshot;
3768         struct list_head splice;
3769 
3770         INIT_LIST_HEAD(&splice);
3771 
3772         list_splice_init(&t->pending_snapshots, &splice);
3773 
3774         while (!list_empty(&splice)) {
3775                 snapshot = list_entry(splice.next,
3776                                       struct btrfs_pending_snapshot,
3777                                       list);
3778                 snapshot->error = -ECANCELED;
3779                 list_del_init(&snapshot->list);
3780         }
3781 }
3782 
3783 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3784 {
3785         struct btrfs_inode *btrfs_inode;
3786         struct list_head splice;
3787 
3788         INIT_LIST_HEAD(&splice);
3789 
3790         spin_lock(&root->fs_info->delalloc_lock);
3791         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3792 
3793         while (!list_empty(&splice)) {
3794                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3795                                     delalloc_inodes);
3796 
3797                 list_del_init(&btrfs_inode->delalloc_inodes);
3798                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3799                           &btrfs_inode->runtime_flags);
3800                 spin_unlock(&root->fs_info->delalloc_lock);
3801 
3802                 btrfs_invalidate_inodes(btrfs_inode->root);
3803 
3804                 spin_lock(&root->fs_info->delalloc_lock);
3805         }
3806 
3807         spin_unlock(&root->fs_info->delalloc_lock);
3808 }
3809 
3810 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3811                                         struct extent_io_tree *dirty_pages,
3812                                         int mark)
3813 {
3814         int ret;
3815         struct extent_buffer *eb;
3816         u64 start = 0;
3817         u64 end;
3818 
3819         while (1) {
3820                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3821                                             mark, NULL);
3822                 if (ret)
3823                         break;
3824 
3825                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3826                 while (start <= end) {
3827                         eb = btrfs_find_tree_block(root, start,
3828                                                    root->leafsize);
3829                         start += root->leafsize;
3830                         if (!eb)
3831                                 continue;
3832                         wait_on_extent_buffer_writeback(eb);
3833 
3834                         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3835                                                &eb->bflags))
3836                                 clear_extent_buffer_dirty(eb);
3837                         free_extent_buffer_stale(eb);
3838                 }
3839         }
3840 
3841         return ret;
3842 }
3843 
3844 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3845                                        struct extent_io_tree *pinned_extents)
3846 {
3847         struct extent_io_tree *unpin;
3848         u64 start;
3849         u64 end;
3850         int ret;
3851         bool loop = true;
3852 
3853         unpin = pinned_extents;
3854 again:
3855         while (1) {
3856                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3857                                             EXTENT_DIRTY, NULL);
3858                 if (ret)
3859                         break;
3860 
3861                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3862                 btrfs_error_unpin_extent_range(root, start, end);
3863                 cond_resched();
3864         }
3865 
3866         if (loop) {
3867                 if (unpin == &root->fs_info->freed_extents[0])
3868                         unpin = &root->fs_info->freed_extents[1];
3869                 else
3870                         unpin = &root->fs_info->freed_extents[0];
3871                 loop = false;
3872                 goto again;
3873         }
3874 
3875         return 0;
3876 }
3877 
3878 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3879                                    struct btrfs_root *root)
3880 {
3881         btrfs_destroy_delayed_refs(cur_trans, root);
3882         btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3883                                 cur_trans->dirty_pages.dirty_bytes);
3884 
3885         /* FIXME: cleanup wait for commit */
3886         cur_trans->in_commit = 1;
3887         cur_trans->blocked = 1;
3888         wake_up(&root->fs_info->transaction_blocked_wait);
3889 
3890         btrfs_evict_pending_snapshots(cur_trans);
3891 
3892         cur_trans->blocked = 0;
3893         wake_up(&root->fs_info->transaction_wait);
3894 
3895         cur_trans->commit_done = 1;
3896         wake_up(&cur_trans->commit_wait);
3897 
3898         btrfs_destroy_delayed_inodes(root);
3899         btrfs_assert_delayed_root_empty(root);
3900 
3901         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3902                                      EXTENT_DIRTY);
3903         btrfs_destroy_pinned_extent(root,
3904                                     root->fs_info->pinned_extents);
3905 
3906         /*
3907         memset(cur_trans, 0, sizeof(*cur_trans));
3908         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3909         */
3910 }
3911 
3912 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3913 {
3914         struct btrfs_transaction *t;
3915         LIST_HEAD(list);
3916 
3917         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3918 
3919         spin_lock(&root->fs_info->trans_lock);
3920         list_splice_init(&root->fs_info->trans_list, &list);
3921         root->fs_info->trans_no_join = 1;
3922         spin_unlock(&root->fs_info->trans_lock);
3923 
3924         while (!list_empty(&list)) {
3925                 t = list_entry(list.next, struct btrfs_transaction, list);
3926 
3927                 btrfs_destroy_ordered_operations(t, root);
3928 
3929                 btrfs_destroy_ordered_extents(root);
3930 
3931                 btrfs_destroy_delayed_refs(t, root);
3932 
3933                 /* FIXME: cleanup wait for commit */
3934                 t->in_commit = 1;
3935                 t->blocked = 1;
3936                 smp_mb();
3937                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3938                         wake_up(&root->fs_info->transaction_blocked_wait);
3939 
3940                 btrfs_evict_pending_snapshots(t);
3941 
3942                 t->blocked = 0;
3943                 smp_mb();
3944                 if (waitqueue_active(&root->fs_info->transaction_wait))
3945                         wake_up(&root->fs_info->transaction_wait);
3946 
3947                 t->commit_done = 1;
3948                 smp_mb();
3949                 if (waitqueue_active(&t->commit_wait))
3950                         wake_up(&t->commit_wait);
3951 
3952                 btrfs_destroy_delayed_inodes(root);
3953                 btrfs_assert_delayed_root_empty(root);
3954 
3955                 btrfs_destroy_delalloc_inodes(root);
3956 
3957                 spin_lock(&root->fs_info->trans_lock);
3958                 root->fs_info->running_transaction = NULL;
3959                 spin_unlock(&root->fs_info->trans_lock);
3960 
3961                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3962                                              EXTENT_DIRTY);
3963 
3964                 btrfs_destroy_pinned_extent(root,
3965                                             root->fs_info->pinned_extents);
3966 
3967                 atomic_set(&t->use_count, 0);
3968                 list_del_init(&t->list);
3969                 memset(t, 0, sizeof(*t));
3970                 kmem_cache_free(btrfs_transaction_cachep, t);
3971         }
3972 
3973         spin_lock(&root->fs_info->trans_lock);
3974         root->fs_info->trans_no_join = 0;
3975         spin_unlock(&root->fs_info->trans_lock);
3976         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3977 
3978         return 0;
3979 }
3980 
3981 static struct extent_io_ops btree_extent_io_ops = {
3982         .readpage_end_io_hook = btree_readpage_end_io_hook,
3983         .readpage_io_failed_hook = btree_io_failed_hook,
3984         .submit_bio_hook = btree_submit_bio_hook,
3985         /* note we're sharing with inode.c for the merge bio hook */
3986         .merge_bio_hook = btrfs_merge_bio_hook,
3987 };
3988 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp