~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/super.c

Version: ~ [ linux-5.7 ] ~ [ linux-5.6.15 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.43 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.125 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.182 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.225 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.225 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.84 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/fs/super.c
  3  *
  4  *  Copyright (C) 1991, 1992  Linus Torvalds
  5  *
  6  *  super.c contains code to handle: - mount structures
  7  *                                   - super-block tables
  8  *                                   - filesystem drivers list
  9  *                                   - mount system call
 10  *                                   - umount system call
 11  *                                   - ustat system call
 12  *
 13  * GK 2/5/95  -  Changed to support mounting the root fs via NFS
 14  *
 15  *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
 16  *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
 17  *  Added options to /proc/mounts:
 18  *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
 19  *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
 20  *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
 21  */
 22 
 23 #include <linux/export.h>
 24 #include <linux/slab.h>
 25 #include <linux/blkdev.h>
 26 #include <linux/mount.h>
 27 #include <linux/security.h>
 28 #include <linux/writeback.h>            /* for the emergency remount stuff */
 29 #include <linux/idr.h>
 30 #include <linux/mutex.h>
 31 #include <linux/backing-dev.h>
 32 #include <linux/rculist_bl.h>
 33 #include <linux/cleancache.h>
 34 #include <linux/fsnotify.h>
 35 #include <linux/lockdep.h>
 36 #include "internal.h"
 37 
 38 
 39 LIST_HEAD(super_blocks);
 40 DEFINE_SPINLOCK(sb_lock);
 41 
 42 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
 43         "sb_writers",
 44         "sb_pagefaults",
 45         "sb_internal",
 46 };
 47 
 48 /*
 49  * One thing we have to be careful of with a per-sb shrinker is that we don't
 50  * drop the last active reference to the superblock from within the shrinker.
 51  * If that happens we could trigger unregistering the shrinker from within the
 52  * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
 53  * take a passive reference to the superblock to avoid this from occurring.
 54  */
 55 static unsigned long super_cache_scan(struct shrinker *shrink,
 56                                       struct shrink_control *sc)
 57 {
 58         struct super_block *sb;
 59         long    fs_objects = 0;
 60         long    total_objects;
 61         long    freed = 0;
 62         long    dentries;
 63         long    inodes;
 64 
 65         sb = container_of(shrink, struct super_block, s_shrink);
 66 
 67         /*
 68          * Deadlock avoidance.  We may hold various FS locks, and we don't want
 69          * to recurse into the FS that called us in clear_inode() and friends..
 70          */
 71         if (!(sc->gfp_mask & __GFP_FS))
 72                 return SHRINK_STOP;
 73 
 74         if (!grab_super_passive(sb))
 75                 return SHRINK_STOP;
 76 
 77         if (sb->s_op->nr_cached_objects)
 78                 fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid);
 79 
 80         inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
 81         dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
 82         total_objects = dentries + inodes + fs_objects + 1;
 83         if (!total_objects)
 84                 total_objects = 1;
 85 
 86         /* proportion the scan between the caches */
 87         dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
 88         inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
 89 
 90         /*
 91          * prune the dcache first as the icache is pinned by it, then
 92          * prune the icache, followed by the filesystem specific caches
 93          */
 94         freed = prune_dcache_sb(sb, dentries, sc->nid);
 95         freed += prune_icache_sb(sb, inodes, sc->nid);
 96 
 97         if (fs_objects) {
 98                 fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
 99                                                                 total_objects);
100                 freed += sb->s_op->free_cached_objects(sb, fs_objects,
101                                                        sc->nid);
102         }
103 
104         drop_super(sb);
105         return freed;
106 }
107 
108 static unsigned long super_cache_count(struct shrinker *shrink,
109                                        struct shrink_control *sc)
110 {
111         struct super_block *sb;
112         long    total_objects = 0;
113 
114         sb = container_of(shrink, struct super_block, s_shrink);
115 
116         /*
117          * Don't call grab_super_passive as it is a potential
118          * scalability bottleneck. The counts could get updated
119          * between super_cache_count and super_cache_scan anyway.
120          * Call to super_cache_count with shrinker_rwsem held
121          * ensures the safety of call to list_lru_count_node() and
122          * s_op->nr_cached_objects().
123          */
124         if (sb->s_op && sb->s_op->nr_cached_objects)
125                 total_objects = sb->s_op->nr_cached_objects(sb,
126                                                  sc->nid);
127 
128         total_objects += list_lru_count_node(&sb->s_dentry_lru,
129                                                  sc->nid);
130         total_objects += list_lru_count_node(&sb->s_inode_lru,
131                                                  sc->nid);
132 
133         total_objects = vfs_pressure_ratio(total_objects);
134         return total_objects;
135 }
136 
137 /**
138  *      destroy_super   -       frees a superblock
139  *      @s: superblock to free
140  *
141  *      Frees a superblock.
142  */
143 static void destroy_super(struct super_block *s)
144 {
145         int i;
146         list_lru_destroy(&s->s_dentry_lru);
147         list_lru_destroy(&s->s_inode_lru);
148         for (i = 0; i < SB_FREEZE_LEVELS; i++)
149                 percpu_counter_destroy(&s->s_writers.counter[i]);
150         security_sb_free(s);
151         WARN_ON(!list_empty(&s->s_mounts));
152         kfree(s->s_subtype);
153         kfree(s->s_options);
154         kfree_rcu(s, rcu);
155 }
156 
157 /**
158  *      alloc_super     -       create new superblock
159  *      @type:  filesystem type superblock should belong to
160  *      @flags: the mount flags
161  *
162  *      Allocates and initializes a new &struct super_block.  alloc_super()
163  *      returns a pointer new superblock or %NULL if allocation had failed.
164  */
165 static struct super_block *alloc_super(struct file_system_type *type, int flags)
166 {
167         struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
168         static const struct super_operations default_op;
169         int i;
170 
171         if (!s)
172                 return NULL;
173 
174         INIT_LIST_HEAD(&s->s_mounts);
175 
176         if (security_sb_alloc(s))
177                 goto fail;
178 
179         for (i = 0; i < SB_FREEZE_LEVELS; i++) {
180                 if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0)
181                         goto fail;
182                 lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
183                                  &type->s_writers_key[i], 0);
184         }
185         init_waitqueue_head(&s->s_writers.wait);
186         init_waitqueue_head(&s->s_writers.wait_unfrozen);
187         s->s_flags = flags;
188         s->s_bdi = &default_backing_dev_info;
189         INIT_HLIST_NODE(&s->s_instances);
190         INIT_HLIST_BL_HEAD(&s->s_anon);
191         INIT_LIST_HEAD(&s->s_inodes);
192 
193         if (list_lru_init(&s->s_dentry_lru))
194                 goto fail;
195         if (list_lru_init(&s->s_inode_lru))
196                 goto fail;
197 
198         init_rwsem(&s->s_umount);
199         lockdep_set_class(&s->s_umount, &type->s_umount_key);
200         /*
201          * sget() can have s_umount recursion.
202          *
203          * When it cannot find a suitable sb, it allocates a new
204          * one (this one), and tries again to find a suitable old
205          * one.
206          *
207          * In case that succeeds, it will acquire the s_umount
208          * lock of the old one. Since these are clearly distrinct
209          * locks, and this object isn't exposed yet, there's no
210          * risk of deadlocks.
211          *
212          * Annotate this by putting this lock in a different
213          * subclass.
214          */
215         down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
216         s->s_count = 1;
217         atomic_set(&s->s_active, 1);
218         mutex_init(&s->s_vfs_rename_mutex);
219         lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
220         mutex_init(&s->s_dquot.dqio_mutex);
221         mutex_init(&s->s_dquot.dqonoff_mutex);
222         s->s_maxbytes = MAX_NON_LFS;
223         s->s_op = &default_op;
224         s->s_time_gran = 1000000000;
225         s->cleancache_poolid = -1;
226 
227         s->s_shrink.seeks = DEFAULT_SEEKS;
228         s->s_shrink.scan_objects = super_cache_scan;
229         s->s_shrink.count_objects = super_cache_count;
230         s->s_shrink.batch = 1024;
231         s->s_shrink.flags = SHRINKER_NUMA_AWARE;
232         return s;
233 
234 fail:
235         destroy_super(s);
236         return NULL;
237 }
238 
239 /* Superblock refcounting  */
240 
241 /*
242  * Drop a superblock's refcount.  The caller must hold sb_lock.
243  */
244 static void __put_super(struct super_block *sb)
245 {
246         if (!--sb->s_count) {
247                 list_del_init(&sb->s_list);
248                 destroy_super(sb);
249         }
250 }
251 
252 /**
253  *      put_super       -       drop a temporary reference to superblock
254  *      @sb: superblock in question
255  *
256  *      Drops a temporary reference, frees superblock if there's no
257  *      references left.
258  */
259 static void put_super(struct super_block *sb)
260 {
261         spin_lock(&sb_lock);
262         __put_super(sb);
263         spin_unlock(&sb_lock);
264 }
265 
266 
267 /**
268  *      deactivate_locked_super -       drop an active reference to superblock
269  *      @s: superblock to deactivate
270  *
271  *      Drops an active reference to superblock, converting it into a temprory
272  *      one if there is no other active references left.  In that case we
273  *      tell fs driver to shut it down and drop the temporary reference we
274  *      had just acquired.
275  *
276  *      Caller holds exclusive lock on superblock; that lock is released.
277  */
278 void deactivate_locked_super(struct super_block *s)
279 {
280         struct file_system_type *fs = s->s_type;
281         if (atomic_dec_and_test(&s->s_active)) {
282                 cleancache_invalidate_fs(s);
283                 unregister_shrinker(&s->s_shrink);
284                 fs->kill_sb(s);
285 
286                 put_filesystem(fs);
287                 put_super(s);
288         } else {
289                 up_write(&s->s_umount);
290         }
291 }
292 
293 EXPORT_SYMBOL(deactivate_locked_super);
294 
295 /**
296  *      deactivate_super        -       drop an active reference to superblock
297  *      @s: superblock to deactivate
298  *
299  *      Variant of deactivate_locked_super(), except that superblock is *not*
300  *      locked by caller.  If we are going to drop the final active reference,
301  *      lock will be acquired prior to that.
302  */
303 void deactivate_super(struct super_block *s)
304 {
305         if (!atomic_add_unless(&s->s_active, -1, 1)) {
306                 down_write(&s->s_umount);
307                 deactivate_locked_super(s);
308         }
309 }
310 
311 EXPORT_SYMBOL(deactivate_super);
312 
313 /**
314  *      grab_super - acquire an active reference
315  *      @s: reference we are trying to make active
316  *
317  *      Tries to acquire an active reference.  grab_super() is used when we
318  *      had just found a superblock in super_blocks or fs_type->fs_supers
319  *      and want to turn it into a full-blown active reference.  grab_super()
320  *      is called with sb_lock held and drops it.  Returns 1 in case of
321  *      success, 0 if we had failed (superblock contents was already dead or
322  *      dying when grab_super() had been called).  Note that this is only
323  *      called for superblocks not in rundown mode (== ones still on ->fs_supers
324  *      of their type), so increment of ->s_count is OK here.
325  */
326 static int grab_super(struct super_block *s) __releases(sb_lock)
327 {
328         s->s_count++;
329         spin_unlock(&sb_lock);
330         down_write(&s->s_umount);
331         if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
332                 put_super(s);
333                 return 1;
334         }
335         up_write(&s->s_umount);
336         put_super(s);
337         return 0;
338 }
339 
340 /*
341  *      grab_super_passive - acquire a passive reference
342  *      @sb: reference we are trying to grab
343  *
344  *      Tries to acquire a passive reference. This is used in places where we
345  *      cannot take an active reference but we need to ensure that the
346  *      superblock does not go away while we are working on it. It returns
347  *      false if a reference was not gained, and returns true with the s_umount
348  *      lock held in read mode if a reference is gained. On successful return,
349  *      the caller must drop the s_umount lock and the passive reference when
350  *      done.
351  */
352 bool grab_super_passive(struct super_block *sb)
353 {
354         spin_lock(&sb_lock);
355         if (hlist_unhashed(&sb->s_instances)) {
356                 spin_unlock(&sb_lock);
357                 return false;
358         }
359 
360         sb->s_count++;
361         spin_unlock(&sb_lock);
362 
363         if (down_read_trylock(&sb->s_umount)) {
364                 if (sb->s_root && (sb->s_flags & MS_BORN))
365                         return true;
366                 up_read(&sb->s_umount);
367         }
368 
369         put_super(sb);
370         return false;
371 }
372 
373 /**
374  *      generic_shutdown_super  -       common helper for ->kill_sb()
375  *      @sb: superblock to kill
376  *
377  *      generic_shutdown_super() does all fs-independent work on superblock
378  *      shutdown.  Typical ->kill_sb() should pick all fs-specific objects
379  *      that need destruction out of superblock, call generic_shutdown_super()
380  *      and release aforementioned objects.  Note: dentries and inodes _are_
381  *      taken care of and do not need specific handling.
382  *
383  *      Upon calling this function, the filesystem may no longer alter or
384  *      rearrange the set of dentries belonging to this super_block, nor may it
385  *      change the attachments of dentries to inodes.
386  */
387 void generic_shutdown_super(struct super_block *sb)
388 {
389         const struct super_operations *sop = sb->s_op;
390 
391         if (sb->s_root) {
392                 shrink_dcache_for_umount(sb);
393                 sync_filesystem(sb);
394                 sb->s_flags &= ~MS_ACTIVE;
395 
396                 fsnotify_unmount_inodes(&sb->s_inodes);
397 
398                 evict_inodes(sb);
399 
400                 if (sb->s_dio_done_wq) {
401                         destroy_workqueue(sb->s_dio_done_wq);
402                         sb->s_dio_done_wq = NULL;
403                 }
404 
405                 if (sop->put_super)
406                         sop->put_super(sb);
407 
408                 if (!list_empty(&sb->s_inodes)) {
409                         printk("VFS: Busy inodes after unmount of %s. "
410                            "Self-destruct in 5 seconds.  Have a nice day...\n",
411                            sb->s_id);
412                 }
413         }
414         spin_lock(&sb_lock);
415         /* should be initialized for __put_super_and_need_restart() */
416         hlist_del_init(&sb->s_instances);
417         spin_unlock(&sb_lock);
418         up_write(&sb->s_umount);
419 }
420 
421 EXPORT_SYMBOL(generic_shutdown_super);
422 
423 /**
424  *      sget    -       find or create a superblock
425  *      @type:  filesystem type superblock should belong to
426  *      @test:  comparison callback
427  *      @set:   setup callback
428  *      @flags: mount flags
429  *      @data:  argument to each of them
430  */
431 struct super_block *sget(struct file_system_type *type,
432                         int (*test)(struct super_block *,void *),
433                         int (*set)(struct super_block *,void *),
434                         int flags,
435                         void *data)
436 {
437         struct super_block *s = NULL;
438         struct super_block *old;
439         int err;
440 
441 retry:
442         spin_lock(&sb_lock);
443         if (test) {
444                 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
445                         if (!test(old, data))
446                                 continue;
447                         if (!grab_super(old))
448                                 goto retry;
449                         if (s) {
450                                 up_write(&s->s_umount);
451                                 destroy_super(s);
452                                 s = NULL;
453                         }
454                         return old;
455                 }
456         }
457         if (!s) {
458                 spin_unlock(&sb_lock);
459                 s = alloc_super(type, flags);
460                 if (!s)
461                         return ERR_PTR(-ENOMEM);
462                 goto retry;
463         }
464                 
465         err = set(s, data);
466         if (err) {
467                 spin_unlock(&sb_lock);
468                 up_write(&s->s_umount);
469                 destroy_super(s);
470                 return ERR_PTR(err);
471         }
472         s->s_type = type;
473         strlcpy(s->s_id, type->name, sizeof(s->s_id));
474         list_add_tail(&s->s_list, &super_blocks);
475         hlist_add_head(&s->s_instances, &type->fs_supers);
476         spin_unlock(&sb_lock);
477         get_filesystem(type);
478         register_shrinker(&s->s_shrink);
479         return s;
480 }
481 
482 EXPORT_SYMBOL(sget);
483 
484 void drop_super(struct super_block *sb)
485 {
486         up_read(&sb->s_umount);
487         put_super(sb);
488 }
489 
490 EXPORT_SYMBOL(drop_super);
491 
492 /**
493  *      iterate_supers - call function for all active superblocks
494  *      @f: function to call
495  *      @arg: argument to pass to it
496  *
497  *      Scans the superblock list and calls given function, passing it
498  *      locked superblock and given argument.
499  */
500 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
501 {
502         struct super_block *sb, *p = NULL;
503 
504         spin_lock(&sb_lock);
505         list_for_each_entry(sb, &super_blocks, s_list) {
506                 if (hlist_unhashed(&sb->s_instances))
507                         continue;
508                 sb->s_count++;
509                 spin_unlock(&sb_lock);
510 
511                 down_read(&sb->s_umount);
512                 if (sb->s_root && (sb->s_flags & MS_BORN))
513                         f(sb, arg);
514                 up_read(&sb->s_umount);
515 
516                 spin_lock(&sb_lock);
517                 if (p)
518                         __put_super(p);
519                 p = sb;
520         }
521         if (p)
522                 __put_super(p);
523         spin_unlock(&sb_lock);
524 }
525 
526 /**
527  *      iterate_supers_type - call function for superblocks of given type
528  *      @type: fs type
529  *      @f: function to call
530  *      @arg: argument to pass to it
531  *
532  *      Scans the superblock list and calls given function, passing it
533  *      locked superblock and given argument.
534  */
535 void iterate_supers_type(struct file_system_type *type,
536         void (*f)(struct super_block *, void *), void *arg)
537 {
538         struct super_block *sb, *p = NULL;
539 
540         spin_lock(&sb_lock);
541         hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
542                 sb->s_count++;
543                 spin_unlock(&sb_lock);
544 
545                 down_read(&sb->s_umount);
546                 if (sb->s_root && (sb->s_flags & MS_BORN))
547                         f(sb, arg);
548                 up_read(&sb->s_umount);
549 
550                 spin_lock(&sb_lock);
551                 if (p)
552                         __put_super(p);
553                 p = sb;
554         }
555         if (p)
556                 __put_super(p);
557         spin_unlock(&sb_lock);
558 }
559 
560 EXPORT_SYMBOL(iterate_supers_type);
561 
562 /**
563  *      get_super - get the superblock of a device
564  *      @bdev: device to get the superblock for
565  *      
566  *      Scans the superblock list and finds the superblock of the file system
567  *      mounted on the device given. %NULL is returned if no match is found.
568  */
569 
570 struct super_block *get_super(struct block_device *bdev)
571 {
572         struct super_block *sb;
573 
574         if (!bdev)
575                 return NULL;
576 
577         spin_lock(&sb_lock);
578 rescan:
579         list_for_each_entry(sb, &super_blocks, s_list) {
580                 if (hlist_unhashed(&sb->s_instances))
581                         continue;
582                 if (sb->s_bdev == bdev) {
583                         sb->s_count++;
584                         spin_unlock(&sb_lock);
585                         down_read(&sb->s_umount);
586                         /* still alive? */
587                         if (sb->s_root && (sb->s_flags & MS_BORN))
588                                 return sb;
589                         up_read(&sb->s_umount);
590                         /* nope, got unmounted */
591                         spin_lock(&sb_lock);
592                         __put_super(sb);
593                         goto rescan;
594                 }
595         }
596         spin_unlock(&sb_lock);
597         return NULL;
598 }
599 
600 EXPORT_SYMBOL(get_super);
601 
602 /**
603  *      get_super_thawed - get thawed superblock of a device
604  *      @bdev: device to get the superblock for
605  *
606  *      Scans the superblock list and finds the superblock of the file system
607  *      mounted on the device. The superblock is returned once it is thawed
608  *      (or immediately if it was not frozen). %NULL is returned if no match
609  *      is found.
610  */
611 struct super_block *get_super_thawed(struct block_device *bdev)
612 {
613         while (1) {
614                 struct super_block *s = get_super(bdev);
615                 if (!s || s->s_writers.frozen == SB_UNFROZEN)
616                         return s;
617                 up_read(&s->s_umount);
618                 wait_event(s->s_writers.wait_unfrozen,
619                            s->s_writers.frozen == SB_UNFROZEN);
620                 put_super(s);
621         }
622 }
623 EXPORT_SYMBOL(get_super_thawed);
624 
625 /**
626  * get_active_super - get an active reference to the superblock of a device
627  * @bdev: device to get the superblock for
628  *
629  * Scans the superblock list and finds the superblock of the file system
630  * mounted on the device given.  Returns the superblock with an active
631  * reference or %NULL if none was found.
632  */
633 struct super_block *get_active_super(struct block_device *bdev)
634 {
635         struct super_block *sb;
636 
637         if (!bdev)
638                 return NULL;
639 
640 restart:
641         spin_lock(&sb_lock);
642         list_for_each_entry(sb, &super_blocks, s_list) {
643                 if (hlist_unhashed(&sb->s_instances))
644                         continue;
645                 if (sb->s_bdev == bdev) {
646                         if (!grab_super(sb))
647                                 goto restart;
648                         up_write(&sb->s_umount);
649                         return sb;
650                 }
651         }
652         spin_unlock(&sb_lock);
653         return NULL;
654 }
655  
656 struct super_block *user_get_super(dev_t dev)
657 {
658         struct super_block *sb;
659 
660         spin_lock(&sb_lock);
661 rescan:
662         list_for_each_entry(sb, &super_blocks, s_list) {
663                 if (hlist_unhashed(&sb->s_instances))
664                         continue;
665                 if (sb->s_dev ==  dev) {
666                         sb->s_count++;
667                         spin_unlock(&sb_lock);
668                         down_read(&sb->s_umount);
669                         /* still alive? */
670                         if (sb->s_root && (sb->s_flags & MS_BORN))
671                                 return sb;
672                         up_read(&sb->s_umount);
673                         /* nope, got unmounted */
674                         spin_lock(&sb_lock);
675                         __put_super(sb);
676                         goto rescan;
677                 }
678         }
679         spin_unlock(&sb_lock);
680         return NULL;
681 }
682 
683 /**
684  *      do_remount_sb - asks filesystem to change mount options.
685  *      @sb:    superblock in question
686  *      @flags: numeric part of options
687  *      @data:  the rest of options
688  *      @force: whether or not to force the change
689  *
690  *      Alters the mount options of a mounted file system.
691  */
692 int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
693 {
694         int retval;
695         int remount_ro;
696 
697         if (sb->s_writers.frozen != SB_UNFROZEN)
698                 return -EBUSY;
699 
700 #ifdef CONFIG_BLOCK
701         if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
702                 return -EACCES;
703 #endif
704 
705         remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
706 
707         if (remount_ro) {
708                 if (sb->s_pins.first) {
709                         up_write(&sb->s_umount);
710                         sb_pin_kill(sb);
711                         down_write(&sb->s_umount);
712                         if (!sb->s_root)
713                                 return 0;
714                         if (sb->s_writers.frozen != SB_UNFROZEN)
715                                 return -EBUSY;
716                         remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
717                 }
718         }
719         shrink_dcache_sb(sb);
720 
721         /* If we are remounting RDONLY and current sb is read/write,
722            make sure there are no rw files opened */
723         if (remount_ro) {
724                 if (force) {
725                         sb->s_readonly_remount = 1;
726                         smp_wmb();
727                 } else {
728                         retval = sb_prepare_remount_readonly(sb);
729                         if (retval)
730                                 return retval;
731                 }
732         }
733 
734         if (sb->s_op->remount_fs) {
735                 retval = sb->s_op->remount_fs(sb, &flags, data);
736                 if (retval) {
737                         if (!force)
738                                 goto cancel_readonly;
739                         /* If forced remount, go ahead despite any errors */
740                         WARN(1, "forced remount of a %s fs returned %i\n",
741                              sb->s_type->name, retval);
742                 }
743         }
744         sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
745         /* Needs to be ordered wrt mnt_is_readonly() */
746         smp_wmb();
747         sb->s_readonly_remount = 0;
748 
749         /*
750          * Some filesystems modify their metadata via some other path than the
751          * bdev buffer cache (eg. use a private mapping, or directories in
752          * pagecache, etc). Also file data modifications go via their own
753          * mappings. So If we try to mount readonly then copy the filesystem
754          * from bdev, we could get stale data, so invalidate it to give a best
755          * effort at coherency.
756          */
757         if (remount_ro && sb->s_bdev)
758                 invalidate_bdev(sb->s_bdev);
759         return 0;
760 
761 cancel_readonly:
762         sb->s_readonly_remount = 0;
763         return retval;
764 }
765 
766 static void do_emergency_remount(struct work_struct *work)
767 {
768         struct super_block *sb, *p = NULL;
769 
770         spin_lock(&sb_lock);
771         list_for_each_entry(sb, &super_blocks, s_list) {
772                 if (hlist_unhashed(&sb->s_instances))
773                         continue;
774                 sb->s_count++;
775                 spin_unlock(&sb_lock);
776                 down_write(&sb->s_umount);
777                 if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
778                     !(sb->s_flags & MS_RDONLY)) {
779                         /*
780                          * What lock protects sb->s_flags??
781                          */
782                         do_remount_sb(sb, MS_RDONLY, NULL, 1);
783                 }
784                 up_write(&sb->s_umount);
785                 spin_lock(&sb_lock);
786                 if (p)
787                         __put_super(p);
788                 p = sb;
789         }
790         if (p)
791                 __put_super(p);
792         spin_unlock(&sb_lock);
793         kfree(work);
794         printk("Emergency Remount complete\n");
795 }
796 
797 void emergency_remount(void)
798 {
799         struct work_struct *work;
800 
801         work = kmalloc(sizeof(*work), GFP_ATOMIC);
802         if (work) {
803                 INIT_WORK(work, do_emergency_remount);
804                 schedule_work(work);
805         }
806 }
807 
808 /*
809  * Unnamed block devices are dummy devices used by virtual
810  * filesystems which don't use real block-devices.  -- jrs
811  */
812 
813 static DEFINE_IDA(unnamed_dev_ida);
814 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
815 /* Many userspace utilities consider an FSID of 0 invalid.
816  * Always return at least 1 from get_anon_bdev.
817  */
818 static int unnamed_dev_start = 1;
819 
820 int get_anon_bdev(dev_t *p)
821 {
822         int dev;
823         int error;
824 
825  retry:
826         if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
827                 return -ENOMEM;
828         spin_lock(&unnamed_dev_lock);
829         error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
830         if (!error)
831                 unnamed_dev_start = dev + 1;
832         spin_unlock(&unnamed_dev_lock);
833         if (error == -EAGAIN)
834                 /* We raced and lost with another CPU. */
835                 goto retry;
836         else if (error)
837                 return -EAGAIN;
838 
839         if (dev == (1 << MINORBITS)) {
840                 spin_lock(&unnamed_dev_lock);
841                 ida_remove(&unnamed_dev_ida, dev);
842                 if (unnamed_dev_start > dev)
843                         unnamed_dev_start = dev;
844                 spin_unlock(&unnamed_dev_lock);
845                 return -EMFILE;
846         }
847         *p = MKDEV(0, dev & MINORMASK);
848         return 0;
849 }
850 EXPORT_SYMBOL(get_anon_bdev);
851 
852 void free_anon_bdev(dev_t dev)
853 {
854         int slot = MINOR(dev);
855         spin_lock(&unnamed_dev_lock);
856         ida_remove(&unnamed_dev_ida, slot);
857         if (slot < unnamed_dev_start)
858                 unnamed_dev_start = slot;
859         spin_unlock(&unnamed_dev_lock);
860 }
861 EXPORT_SYMBOL(free_anon_bdev);
862 
863 int set_anon_super(struct super_block *s, void *data)
864 {
865         int error = get_anon_bdev(&s->s_dev);
866         if (!error)
867                 s->s_bdi = &noop_backing_dev_info;
868         return error;
869 }
870 
871 EXPORT_SYMBOL(set_anon_super);
872 
873 void kill_anon_super(struct super_block *sb)
874 {
875         dev_t dev = sb->s_dev;
876         generic_shutdown_super(sb);
877         free_anon_bdev(dev);
878 }
879 
880 EXPORT_SYMBOL(kill_anon_super);
881 
882 void kill_litter_super(struct super_block *sb)
883 {
884         if (sb->s_root)
885                 d_genocide(sb->s_root);
886         kill_anon_super(sb);
887 }
888 
889 EXPORT_SYMBOL(kill_litter_super);
890 
891 static int ns_test_super(struct super_block *sb, void *data)
892 {
893         return sb->s_fs_info == data;
894 }
895 
896 static int ns_set_super(struct super_block *sb, void *data)
897 {
898         sb->s_fs_info = data;
899         return set_anon_super(sb, NULL);
900 }
901 
902 struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
903         void *data, int (*fill_super)(struct super_block *, void *, int))
904 {
905         struct super_block *sb;
906 
907         sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
908         if (IS_ERR(sb))
909                 return ERR_CAST(sb);
910 
911         if (!sb->s_root) {
912                 int err;
913                 err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
914                 if (err) {
915                         deactivate_locked_super(sb);
916                         return ERR_PTR(err);
917                 }
918 
919                 sb->s_flags |= MS_ACTIVE;
920         }
921 
922         return dget(sb->s_root);
923 }
924 
925 EXPORT_SYMBOL(mount_ns);
926 
927 #ifdef CONFIG_BLOCK
928 static int set_bdev_super(struct super_block *s, void *data)
929 {
930         s->s_bdev = data;
931         s->s_dev = s->s_bdev->bd_dev;
932 
933         /*
934          * We set the bdi here to the queue backing, file systems can
935          * overwrite this in ->fill_super()
936          */
937         s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
938         return 0;
939 }
940 
941 static int test_bdev_super(struct super_block *s, void *data)
942 {
943         return (void *)s->s_bdev == data;
944 }
945 
946 struct dentry *mount_bdev(struct file_system_type *fs_type,
947         int flags, const char *dev_name, void *data,
948         int (*fill_super)(struct super_block *, void *, int))
949 {
950         struct block_device *bdev;
951         struct super_block *s;
952         fmode_t mode = FMODE_READ | FMODE_EXCL;
953         int error = 0;
954 
955         if (!(flags & MS_RDONLY))
956                 mode |= FMODE_WRITE;
957 
958         bdev = blkdev_get_by_path(dev_name, mode, fs_type);
959         if (IS_ERR(bdev))
960                 return ERR_CAST(bdev);
961 
962         /*
963          * once the super is inserted into the list by sget, s_umount
964          * will protect the lockfs code from trying to start a snapshot
965          * while we are mounting
966          */
967         mutex_lock(&bdev->bd_fsfreeze_mutex);
968         if (bdev->bd_fsfreeze_count > 0) {
969                 mutex_unlock(&bdev->bd_fsfreeze_mutex);
970                 error = -EBUSY;
971                 goto error_bdev;
972         }
973         s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
974                  bdev);
975         mutex_unlock(&bdev->bd_fsfreeze_mutex);
976         if (IS_ERR(s))
977                 goto error_s;
978 
979         if (s->s_root) {
980                 if ((flags ^ s->s_flags) & MS_RDONLY) {
981                         deactivate_locked_super(s);
982                         error = -EBUSY;
983                         goto error_bdev;
984                 }
985 
986                 /*
987                  * s_umount nests inside bd_mutex during
988                  * __invalidate_device().  blkdev_put() acquires
989                  * bd_mutex and can't be called under s_umount.  Drop
990                  * s_umount temporarily.  This is safe as we're
991                  * holding an active reference.
992                  */
993                 up_write(&s->s_umount);
994                 blkdev_put(bdev, mode);
995                 down_write(&s->s_umount);
996         } else {
997                 char b[BDEVNAME_SIZE];
998 
999                 s->s_mode = mode;
1000                 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1001                 sb_set_blocksize(s, block_size(bdev));
1002                 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1003                 if (error) {
1004                         deactivate_locked_super(s);
1005                         goto error;
1006                 }
1007 
1008                 s->s_flags |= MS_ACTIVE;
1009                 bdev->bd_super = s;
1010         }
1011 
1012         return dget(s->s_root);
1013 
1014 error_s:
1015         error = PTR_ERR(s);
1016 error_bdev:
1017         blkdev_put(bdev, mode);
1018 error:
1019         return ERR_PTR(error);
1020 }
1021 EXPORT_SYMBOL(mount_bdev);
1022 
1023 void kill_block_super(struct super_block *sb)
1024 {
1025         struct block_device *bdev = sb->s_bdev;
1026         fmode_t mode = sb->s_mode;
1027 
1028         bdev->bd_super = NULL;
1029         generic_shutdown_super(sb);
1030         sync_blockdev(bdev);
1031         WARN_ON_ONCE(!(mode & FMODE_EXCL));
1032         blkdev_put(bdev, mode | FMODE_EXCL);
1033 }
1034 
1035 EXPORT_SYMBOL(kill_block_super);
1036 #endif
1037 
1038 struct dentry *mount_nodev(struct file_system_type *fs_type,
1039         int flags, void *data,
1040         int (*fill_super)(struct super_block *, void *, int))
1041 {
1042         int error;
1043         struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1044 
1045         if (IS_ERR(s))
1046                 return ERR_CAST(s);
1047 
1048         error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1049         if (error) {
1050                 deactivate_locked_super(s);
1051                 return ERR_PTR(error);
1052         }
1053         s->s_flags |= MS_ACTIVE;
1054         return dget(s->s_root);
1055 }
1056 EXPORT_SYMBOL(mount_nodev);
1057 
1058 static int compare_single(struct super_block *s, void *p)
1059 {
1060         return 1;
1061 }
1062 
1063 struct dentry *mount_single(struct file_system_type *fs_type,
1064         int flags, void *data,
1065         int (*fill_super)(struct super_block *, void *, int))
1066 {
1067         struct super_block *s;
1068         int error;
1069 
1070         s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1071         if (IS_ERR(s))
1072                 return ERR_CAST(s);
1073         if (!s->s_root) {
1074                 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1075                 if (error) {
1076                         deactivate_locked_super(s);
1077                         return ERR_PTR(error);
1078                 }
1079                 s->s_flags |= MS_ACTIVE;
1080         } else {
1081                 do_remount_sb(s, flags, data, 0);
1082         }
1083         return dget(s->s_root);
1084 }
1085 EXPORT_SYMBOL(mount_single);
1086 
1087 struct dentry *
1088 mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1089 {
1090         struct dentry *root;
1091         struct super_block *sb;
1092         char *secdata = NULL;
1093         int error = -ENOMEM;
1094 
1095         if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1096                 secdata = alloc_secdata();
1097                 if (!secdata)
1098                         goto out;
1099 
1100                 error = security_sb_copy_data(data, secdata);
1101                 if (error)
1102                         goto out_free_secdata;
1103         }
1104 
1105         root = type->mount(type, flags, name, data);
1106         if (IS_ERR(root)) {
1107                 error = PTR_ERR(root);
1108                 goto out_free_secdata;
1109         }
1110         sb = root->d_sb;
1111         BUG_ON(!sb);
1112         WARN_ON(!sb->s_bdi);
1113         WARN_ON(sb->s_bdi == &default_backing_dev_info);
1114         sb->s_flags |= MS_BORN;
1115 
1116         error = security_sb_kern_mount(sb, flags, secdata);
1117         if (error)
1118                 goto out_sb;
1119 
1120         /*
1121          * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1122          * but s_maxbytes was an unsigned long long for many releases. Throw
1123          * this warning for a little while to try and catch filesystems that
1124          * violate this rule.
1125          */
1126         WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1127                 "negative value (%lld)\n", type->name, sb->s_maxbytes);
1128 
1129         up_write(&sb->s_umount);
1130         free_secdata(secdata);
1131         return root;
1132 out_sb:
1133         dput(root);
1134         deactivate_locked_super(sb);
1135 out_free_secdata:
1136         free_secdata(secdata);
1137 out:
1138         return ERR_PTR(error);
1139 }
1140 
1141 /*
1142  * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1143  * instead.
1144  */
1145 void __sb_end_write(struct super_block *sb, int level)
1146 {
1147         percpu_counter_dec(&sb->s_writers.counter[level-1]);
1148         /*
1149          * Make sure s_writers are updated before we wake up waiters in
1150          * freeze_super().
1151          */
1152         smp_mb();
1153         if (waitqueue_active(&sb->s_writers.wait))
1154                 wake_up(&sb->s_writers.wait);
1155         rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
1156 }
1157 EXPORT_SYMBOL(__sb_end_write);
1158 
1159 #ifdef CONFIG_LOCKDEP
1160 /*
1161  * We want lockdep to tell us about possible deadlocks with freezing but
1162  * it's it bit tricky to properly instrument it. Getting a freeze protection
1163  * works as getting a read lock but there are subtle problems. XFS for example
1164  * gets freeze protection on internal level twice in some cases, which is OK
1165  * only because we already hold a freeze protection also on higher level. Due
1166  * to these cases we have to tell lockdep we are doing trylock when we
1167  * already hold a freeze protection for a higher freeze level.
1168  */
1169 static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
1170                                 unsigned long ip)
1171 {
1172         int i;
1173 
1174         if (!trylock) {
1175                 for (i = 0; i < level - 1; i++)
1176                         if (lock_is_held(&sb->s_writers.lock_map[i])) {
1177                                 trylock = true;
1178                                 break;
1179                         }
1180         }
1181         rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
1182 }
1183 #endif
1184 
1185 /*
1186  * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1187  * instead.
1188  */
1189 int __sb_start_write(struct super_block *sb, int level, bool wait)
1190 {
1191 retry:
1192         if (unlikely(sb->s_writers.frozen >= level)) {
1193                 if (!wait)
1194                         return 0;
1195                 wait_event(sb->s_writers.wait_unfrozen,
1196                            sb->s_writers.frozen < level);
1197         }
1198 
1199 #ifdef CONFIG_LOCKDEP
1200         acquire_freeze_lock(sb, level, !wait, _RET_IP_);
1201 #endif
1202         percpu_counter_inc(&sb->s_writers.counter[level-1]);
1203         /*
1204          * Make sure counter is updated before we check for frozen.
1205          * freeze_super() first sets frozen and then checks the counter.
1206          */
1207         smp_mb();
1208         if (unlikely(sb->s_writers.frozen >= level)) {
1209                 __sb_end_write(sb, level);
1210                 goto retry;
1211         }
1212         return 1;
1213 }
1214 EXPORT_SYMBOL(__sb_start_write);
1215 
1216 /**
1217  * sb_wait_write - wait until all writers to given file system finish
1218  * @sb: the super for which we wait
1219  * @level: type of writers we wait for (normal vs page fault)
1220  *
1221  * This function waits until there are no writers of given type to given file
1222  * system. Caller of this function should make sure there can be no new writers
1223  * of type @level before calling this function. Otherwise this function can
1224  * livelock.
1225  */
1226 static void sb_wait_write(struct super_block *sb, int level)
1227 {
1228         s64 writers;
1229 
1230         /*
1231          * We just cycle-through lockdep here so that it does not complain
1232          * about returning with lock to userspace
1233          */
1234         rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
1235         rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
1236 
1237         do {
1238                 DEFINE_WAIT(wait);
1239 
1240                 /*
1241                  * We use a barrier in prepare_to_wait() to separate setting
1242                  * of frozen and checking of the counter
1243                  */
1244                 prepare_to_wait(&sb->s_writers.wait, &wait,
1245                                 TASK_UNINTERRUPTIBLE);
1246 
1247                 writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
1248                 if (writers)
1249                         schedule();
1250 
1251                 finish_wait(&sb->s_writers.wait, &wait);
1252         } while (writers);
1253 }
1254 
1255 /**
1256  * freeze_super - lock the filesystem and force it into a consistent state
1257  * @sb: the super to lock
1258  *
1259  * Syncs the super to make sure the filesystem is consistent and calls the fs's
1260  * freeze_fs.  Subsequent calls to this without first thawing the fs will return
1261  * -EBUSY.
1262  *
1263  * During this function, sb->s_writers.frozen goes through these values:
1264  *
1265  * SB_UNFROZEN: File system is normal, all writes progress as usual.
1266  *
1267  * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
1268  * writes should be blocked, though page faults are still allowed. We wait for
1269  * all writes to complete and then proceed to the next stage.
1270  *
1271  * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1272  * but internal fs threads can still modify the filesystem (although they
1273  * should not dirty new pages or inodes), writeback can run etc. After waiting
1274  * for all running page faults we sync the filesystem which will clean all
1275  * dirty pages and inodes (no new dirty pages or inodes can be created when
1276  * sync is running).
1277  *
1278  * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1279  * modification are blocked (e.g. XFS preallocation truncation on inode
1280  * reclaim). This is usually implemented by blocking new transactions for
1281  * filesystems that have them and need this additional guard. After all
1282  * internal writers are finished we call ->freeze_fs() to finish filesystem
1283  * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1284  * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1285  *
1286  * sb->s_writers.frozen is protected by sb->s_umount.
1287  */
1288 int freeze_super(struct super_block *sb)
1289 {
1290         int ret;
1291 
1292         atomic_inc(&sb->s_active);
1293         down_write(&sb->s_umount);
1294         if (sb->s_writers.frozen != SB_UNFROZEN) {
1295                 deactivate_locked_super(sb);
1296                 return -EBUSY;
1297         }
1298 
1299         if (!(sb->s_flags & MS_BORN)) {
1300                 up_write(&sb->s_umount);
1301                 return 0;       /* sic - it's "nothing to do" */
1302         }
1303 
1304         if (sb->s_flags & MS_RDONLY) {
1305                 /* Nothing to do really... */
1306                 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1307                 up_write(&sb->s_umount);
1308                 return 0;
1309         }
1310 
1311         /* From now on, no new normal writers can start */
1312         sb->s_writers.frozen = SB_FREEZE_WRITE;
1313         smp_wmb();
1314 
1315         /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1316         up_write(&sb->s_umount);
1317 
1318         sb_wait_write(sb, SB_FREEZE_WRITE);
1319 
1320         /* Now we go and block page faults... */
1321         down_write(&sb->s_umount);
1322         sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1323         smp_wmb();
1324 
1325         sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1326 
1327         /* All writers are done so after syncing there won't be dirty data */
1328         sync_filesystem(sb);
1329 
1330         /* Now wait for internal filesystem counter */
1331         sb->s_writers.frozen = SB_FREEZE_FS;
1332         smp_wmb();
1333         sb_wait_write(sb, SB_FREEZE_FS);
1334 
1335         if (sb->s_op->freeze_fs) {
1336                 ret = sb->s_op->freeze_fs(sb);
1337                 if (ret) {
1338                         printk(KERN_ERR
1339                                 "VFS:Filesystem freeze failed\n");
1340                         sb->s_writers.frozen = SB_UNFROZEN;
1341                         smp_wmb();
1342                         wake_up(&sb->s_writers.wait_unfrozen);
1343                         deactivate_locked_super(sb);
1344                         return ret;
1345                 }
1346         }
1347         /*
1348          * This is just for debugging purposes so that fs can warn if it
1349          * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
1350          */
1351         sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1352         up_write(&sb->s_umount);
1353         return 0;
1354 }
1355 EXPORT_SYMBOL(freeze_super);
1356 
1357 /**
1358  * thaw_super -- unlock filesystem
1359  * @sb: the super to thaw
1360  *
1361  * Unlocks the filesystem and marks it writeable again after freeze_super().
1362  */
1363 int thaw_super(struct super_block *sb)
1364 {
1365         int error;
1366 
1367         down_write(&sb->s_umount);
1368         if (sb->s_writers.frozen == SB_UNFROZEN) {
1369                 up_write(&sb->s_umount);
1370                 return -EINVAL;
1371         }
1372 
1373         if (sb->s_flags & MS_RDONLY)
1374                 goto out;
1375 
1376         if (sb->s_op->unfreeze_fs) {
1377                 error = sb->s_op->unfreeze_fs(sb);
1378                 if (error) {
1379                         printk(KERN_ERR
1380                                 "VFS:Filesystem thaw failed\n");
1381                         up_write(&sb->s_umount);
1382                         return error;
1383                 }
1384         }
1385 
1386 out:
1387         sb->s_writers.frozen = SB_UNFROZEN;
1388         smp_wmb();
1389         wake_up(&sb->s_writers.wait_unfrozen);
1390         deactivate_locked_super(sb);
1391 
1392         return 0;
1393 }
1394 EXPORT_SYMBOL(thaw_super);
1395 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp