linux/fs/super.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/super.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  super.c contains code to handle: - mount structures
   7 *                                   - super-block tables
   8 *                                   - filesystem drivers list
   9 *                                   - mount system call
  10 *                                   - umount system call
  11 *                                   - ustat system call
  12 *
  13 * GK 2/5/95  -  Changed to support mounting the root fs via NFS
  14 *
  15 *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
  16 *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
  17 *  Added options to /proc/mounts:
  18 *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
  19 *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
  20 *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
  21 */
  22
  23#include <linux/export.h>
  24#include <linux/slab.h>
  25#include <linux/acct.h>
  26#include <linux/blkdev.h>
  27#include <linux/mount.h>
  28#include <linux/security.h>
  29#include <linux/writeback.h>            /* for the emergency remount stuff */
  30#include <linux/idr.h>
  31#include <linux/mutex.h>
  32#include <linux/backing-dev.h>
  33#include <linux/rculist_bl.h>
  34#include <linux/cleancache.h>
  35#include <linux/fsnotify.h>
  36#include <linux/lockdep.h>
  37#include "internal.h"
  38
  39
  40LIST_HEAD(super_blocks);
  41DEFINE_SPINLOCK(sb_lock);
  42
  43static char *sb_writers_name[SB_FREEZE_LEVELS] = {
  44        "sb_writers",
  45        "sb_pagefaults",
  46        "sb_internal",
  47};
  48
  49/*
  50 * One thing we have to be careful of with a per-sb shrinker is that we don't
  51 * drop the last active reference to the superblock from within the shrinker.
  52 * If that happens we could trigger unregistering the shrinker from within the
  53 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
  54 * take a passive reference to the superblock to avoid this from occurring.
  55 */
  56static unsigned long super_cache_scan(struct shrinker *shrink,
  57                                      struct shrink_control *sc)
  58{
  59        struct super_block *sb;
  60        long    fs_objects = 0;
  61        long    total_objects;
  62        long    freed = 0;
  63        long    dentries;
  64        long    inodes;
  65
  66        sb = container_of(shrink, struct super_block, s_shrink);
  67
  68        /*
  69         * Deadlock avoidance.  We may hold various FS locks, and we don't want
  70         * to recurse into the FS that called us in clear_inode() and friends..
  71         */
  72        if (!(sc->gfp_mask & __GFP_FS))
  73                return SHRINK_STOP;
  74
  75        if (!grab_super_passive(sb))
  76                return SHRINK_STOP;
  77
  78        if (sb->s_op->nr_cached_objects)
  79                fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid);
  80
  81        inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
  82        dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
  83        total_objects = dentries + inodes + fs_objects + 1;
  84
  85        /* proportion the scan between the caches */
  86        dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
  87        inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
  88
  89        /*
  90         * prune the dcache first as the icache is pinned by it, then
  91         * prune the icache, followed by the filesystem specific caches
  92         */
  93        freed = prune_dcache_sb(sb, dentries, sc->nid);
  94        freed += prune_icache_sb(sb, inodes, sc->nid);
  95
  96        if (fs_objects) {
  97                fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
  98                                                                total_objects);
  99                freed += sb->s_op->free_cached_objects(sb, fs_objects,
 100                                                       sc->nid);
 101        }
 102
 103        drop_super(sb);
 104        return freed;
 105}
 106
 107static unsigned long super_cache_count(struct shrinker *shrink,
 108                                       struct shrink_control *sc)
 109{
 110        struct super_block *sb;
 111        long    total_objects = 0;
 112
 113        sb = container_of(shrink, struct super_block, s_shrink);
 114
 115        if (!grab_super_passive(sb))
 116                return 0;
 117
 118        if (sb->s_op && sb->s_op->nr_cached_objects)
 119                total_objects = sb->s_op->nr_cached_objects(sb,
 120                                                 sc->nid);
 121
 122        total_objects += list_lru_count_node(&sb->s_dentry_lru,
 123                                                 sc->nid);
 124        total_objects += list_lru_count_node(&sb->s_inode_lru,
 125                                                 sc->nid);
 126
 127        total_objects = vfs_pressure_ratio(total_objects);
 128        drop_super(sb);
 129        return total_objects;
 130}
 131
 132static int init_sb_writers(struct super_block *s, struct file_system_type *type)
 133{
 134        int err;
 135        int i;
 136
 137        for (i = 0; i < SB_FREEZE_LEVELS; i++) {
 138                err = percpu_counter_init(&s->s_writers.counter[i], 0);
 139                if (err < 0)
 140                        goto err_out;
 141                lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
 142                                 &type->s_writers_key[i], 0);
 143        }
 144        init_waitqueue_head(&s->s_writers.wait);
 145        init_waitqueue_head(&s->s_writers.wait_unfrozen);
 146        return 0;
 147err_out:
 148        while (--i >= 0)
 149                percpu_counter_destroy(&s->s_writers.counter[i]);
 150        return err;
 151}
 152
 153static void destroy_sb_writers(struct super_block *s)
 154{
 155        int i;
 156
 157        for (i = 0; i < SB_FREEZE_LEVELS; i++)
 158                percpu_counter_destroy(&s->s_writers.counter[i]);
 159}
 160
 161/**
 162 *      alloc_super     -       create new superblock
 163 *      @type:  filesystem type superblock should belong to
 164 *      @flags: the mount flags
 165 *
 166 *      Allocates and initializes a new &struct super_block.  alloc_super()
 167 *      returns a pointer new superblock or %NULL if allocation had failed.
 168 */
 169static struct super_block *alloc_super(struct file_system_type *type, int flags)
 170{
 171        struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
 172        static const struct super_operations default_op;
 173
 174        if (s) {
 175                if (security_sb_alloc(s))
 176                        goto out_free_sb;
 177
 178#ifdef CONFIG_SMP
 179                s->s_files = alloc_percpu(struct list_head);
 180                if (!s->s_files)
 181                        goto err_out;
 182                else {
 183                        int i;
 184
 185                        for_each_possible_cpu(i)
 186                                INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
 187                }
 188#else
 189                INIT_LIST_HEAD(&s->s_files);
 190#endif
 191                if (init_sb_writers(s, type))
 192                        goto err_out;
 193                s->s_flags = flags;
 194                s->s_bdi = &default_backing_dev_info;
 195                INIT_HLIST_NODE(&s->s_instances);
 196                INIT_HLIST_BL_HEAD(&s->s_anon);
 197                INIT_LIST_HEAD(&s->s_inodes);
 198
 199                if (list_lru_init(&s->s_dentry_lru))
 200                        goto err_out;
 201                if (list_lru_init(&s->s_inode_lru))
 202                        goto err_out_dentry_lru;
 203
 204                INIT_LIST_HEAD(&s->s_mounts);
 205                init_rwsem(&s->s_umount);
 206                lockdep_set_class(&s->s_umount, &type->s_umount_key);
 207                /*
 208                 * sget() can have s_umount recursion.
 209                 *
 210                 * When it cannot find a suitable sb, it allocates a new
 211                 * one (this one), and tries again to find a suitable old
 212                 * one.
 213                 *
 214                 * In case that succeeds, it will acquire the s_umount
 215                 * lock of the old one. Since these are clearly distrinct
 216                 * locks, and this object isn't exposed yet, there's no
 217                 * risk of deadlocks.
 218                 *
 219                 * Annotate this by putting this lock in a different
 220                 * subclass.
 221                 */
 222                down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
 223                s->s_count = 1;
 224                atomic_set(&s->s_active, 1);
 225                mutex_init(&s->s_vfs_rename_mutex);
 226                lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
 227                mutex_init(&s->s_dquot.dqio_mutex);
 228                mutex_init(&s->s_dquot.dqonoff_mutex);
 229                init_rwsem(&s->s_dquot.dqptr_sem);
 230                s->s_maxbytes = MAX_NON_LFS;
 231                s->s_op = &default_op;
 232                s->s_time_gran = 1000000000;
 233                s->cleancache_poolid = -1;
 234
 235                s->s_shrink.seeks = DEFAULT_SEEKS;
 236                s->s_shrink.scan_objects = super_cache_scan;
 237                s->s_shrink.count_objects = super_cache_count;
 238                s->s_shrink.batch = 1024;
 239                s->s_shrink.flags = SHRINKER_NUMA_AWARE;
 240        }
 241out:
 242        return s;
 243
 244err_out_dentry_lru:
 245        list_lru_destroy(&s->s_dentry_lru);
 246err_out:
 247        security_sb_free(s);
 248#ifdef CONFIG_SMP
 249        if (s->s_files)
 250                free_percpu(s->s_files);
 251#endif
 252        destroy_sb_writers(s);
 253out_free_sb:
 254        kfree(s);
 255        s = NULL;
 256        goto out;
 257}
 258
 259/**
 260 *      destroy_super   -       frees a superblock
 261 *      @s: superblock to free
 262 *
 263 *      Frees a superblock.
 264 */
 265static inline void destroy_super(struct super_block *s)
 266{
 267        list_lru_destroy(&s->s_dentry_lru);
 268        list_lru_destroy(&s->s_inode_lru);
 269#ifdef CONFIG_SMP
 270        free_percpu(s->s_files);
 271#endif
 272        destroy_sb_writers(s);
 273        security_sb_free(s);
 274        WARN_ON(!list_empty(&s->s_mounts));
 275        kfree(s->s_subtype);
 276        kfree(s->s_options);
 277        kfree(s);
 278}
 279
 280/* Superblock refcounting  */
 281
 282/*
 283 * Drop a superblock's refcount.  The caller must hold sb_lock.
 284 */
 285static void __put_super(struct super_block *sb)
 286{
 287        if (!--sb->s_count) {
 288                list_del_init(&sb->s_list);
 289                destroy_super(sb);
 290        }
 291}
 292
 293/**
 294 *      put_super       -       drop a temporary reference to superblock
 295 *      @sb: superblock in question
 296 *
 297 *      Drops a temporary reference, frees superblock if there's no
 298 *      references left.
 299 */
 300static void put_super(struct super_block *sb)
 301{
 302        spin_lock(&sb_lock);
 303        __put_super(sb);
 304        spin_unlock(&sb_lock);
 305}
 306
 307
 308/**
 309 *      deactivate_locked_super -       drop an active reference to superblock
 310 *      @s: superblock to deactivate
 311 *
 312 *      Drops an active reference to superblock, converting it into a temprory
 313 *      one if there is no other active references left.  In that case we
 314 *      tell fs driver to shut it down and drop the temporary reference we
 315 *      had just acquired.
 316 *
 317 *      Caller holds exclusive lock on superblock; that lock is released.
 318 */
 319void deactivate_locked_super(struct super_block *s)
 320{
 321        struct file_system_type *fs = s->s_type;
 322        if (atomic_dec_and_test(&s->s_active)) {
 323                cleancache_invalidate_fs(s);
 324                fs->kill_sb(s);
 325
 326                /* caches are now gone, we can safely kill the shrinker now */
 327                unregister_shrinker(&s->s_shrink);
 328
 329                put_filesystem(fs);
 330                put_super(s);
 331        } else {
 332                up_write(&s->s_umount);
 333        }
 334}
 335
 336EXPORT_SYMBOL(deactivate_locked_super);
 337
 338/**
 339 *      deactivate_super        -       drop an active reference to superblock
 340 *      @s: superblock to deactivate
 341 *
 342 *      Variant of deactivate_locked_super(), except that superblock is *not*
 343 *      locked by caller.  If we are going to drop the final active reference,
 344 *      lock will be acquired prior to that.
 345 */
 346void deactivate_super(struct super_block *s)
 347{
 348        if (!atomic_add_unless(&s->s_active, -1, 1)) {
 349                down_write(&s->s_umount);
 350                deactivate_locked_super(s);
 351        }
 352}
 353
 354EXPORT_SYMBOL(deactivate_super);
 355
 356/**
 357 *      grab_super - acquire an active reference
 358 *      @s: reference we are trying to make active
 359 *
 360 *      Tries to acquire an active reference.  grab_super() is used when we
 361 *      had just found a superblock in super_blocks or fs_type->fs_supers
 362 *      and want to turn it into a full-blown active reference.  grab_super()
 363 *      is called with sb_lock held and drops it.  Returns 1 in case of
 364 *      success, 0 if we had failed (superblock contents was already dead or
 365 *      dying when grab_super() had been called).  Note that this is only
 366 *      called for superblocks not in rundown mode (== ones still on ->fs_supers
 367 *      of their type), so increment of ->s_count is OK here.
 368 */
 369static int grab_super(struct super_block *s) __releases(sb_lock)
 370{
 371        s->s_count++;
 372        spin_unlock(&sb_lock);
 373        down_write(&s->s_umount);
 374        if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
 375                put_super(s);
 376                return 1;
 377        }
 378        up_write(&s->s_umount);
 379        put_super(s);
 380        return 0;
 381}
 382
 383/*
 384 *      grab_super_passive - acquire a passive reference
 385 *      @sb: reference we are trying to grab
 386 *
 387 *      Tries to acquire a passive reference. This is used in places where we
 388 *      cannot take an active reference but we need to ensure that the
 389 *      superblock does not go away while we are working on it. It returns
 390 *      false if a reference was not gained, and returns true with the s_umount
 391 *      lock held in read mode if a reference is gained. On successful return,
 392 *      the caller must drop the s_umount lock and the passive reference when
 393 *      done.
 394 */
 395bool grab_super_passive(struct super_block *sb)
 396{
 397        spin_lock(&sb_lock);
 398        if (hlist_unhashed(&sb->s_instances)) {
 399                spin_unlock(&sb_lock);
 400                return false;
 401        }
 402
 403        sb->s_count++;
 404        spin_unlock(&sb_lock);
 405
 406        if (down_read_trylock(&sb->s_umount)) {
 407                if (sb->s_root && (sb->s_flags & MS_BORN))
 408                        return true;
 409                up_read(&sb->s_umount);
 410        }
 411
 412        put_super(sb);
 413        return false;
 414}
 415
 416/**
 417 *      generic_shutdown_super  -       common helper for ->kill_sb()
 418 *      @sb: superblock to kill
 419 *
 420 *      generic_shutdown_super() does all fs-independent work on superblock
 421 *      shutdown.  Typical ->kill_sb() should pick all fs-specific objects
 422 *      that need destruction out of superblock, call generic_shutdown_super()
 423 *      and release aforementioned objects.  Note: dentries and inodes _are_
 424 *      taken care of and do not need specific handling.
 425 *
 426 *      Upon calling this function, the filesystem may no longer alter or
 427 *      rearrange the set of dentries belonging to this super_block, nor may it
 428 *      change the attachments of dentries to inodes.
 429 */
 430void generic_shutdown_super(struct super_block *sb)
 431{
 432        const struct super_operations *sop = sb->s_op;
 433
 434        if (sb->s_root) {
 435                shrink_dcache_for_umount(sb);
 436                sync_filesystem(sb);
 437                sb->s_flags &= ~MS_ACTIVE;
 438
 439                fsnotify_unmount_inodes(&sb->s_inodes);
 440
 441                evict_inodes(sb);
 442
 443                if (sb->s_dio_done_wq) {
 444                        destroy_workqueue(sb->s_dio_done_wq);
 445                        sb->s_dio_done_wq = NULL;
 446                }
 447
 448                if (sop->put_super)
 449                        sop->put_super(sb);
 450
 451                if (!list_empty(&sb->s_inodes)) {
 452                        printk("VFS: Busy inodes after unmount of %s. "
 453                           "Self-destruct in 5 seconds.  Have a nice day...\n",
 454                           sb->s_id);
 455                }
 456        }
 457        spin_lock(&sb_lock);
 458        /* should be initialized for __put_super_and_need_restart() */
 459        hlist_del_init(&sb->s_instances);
 460        spin_unlock(&sb_lock);
 461        up_write(&sb->s_umount);
 462}
 463
 464EXPORT_SYMBOL(generic_shutdown_super);
 465
 466/**
 467 *      sget    -       find or create a superblock
 468 *      @type:  filesystem type superblock should belong to
 469 *      @test:  comparison callback
 470 *      @set:   setup callback
 471 *      @flags: mount flags
 472 *      @data:  argument to each of them
 473 */
 474struct super_block *sget(struct file_system_type *type,
 475                        int (*test)(struct super_block *,void *),
 476                        int (*set)(struct super_block *,void *),
 477                        int flags,
 478                        void *data)
 479{
 480        struct super_block *s = NULL;
 481        struct super_block *old;
 482        int err;
 483
 484retry:
 485        spin_lock(&sb_lock);
 486        if (test) {
 487                hlist_for_each_entry(old, &type->fs_supers, s_instances) {
 488                        if (!test(old, data))
 489                                continue;
 490                        if (!grab_super(old))
 491                                goto retry;
 492                        if (s) {
 493                                up_write(&s->s_umount);
 494                                destroy_super(s);
 495                                s = NULL;
 496                        }
 497                        return old;
 498                }
 499        }
 500        if (!s) {
 501                spin_unlock(&sb_lock);
 502                s = alloc_super(type, flags);
 503                if (!s)
 504                        return ERR_PTR(-ENOMEM);
 505                goto retry;
 506        }
 507                
 508        err = set(s, data);
 509        if (err) {
 510                spin_unlock(&sb_lock);
 511                up_write(&s->s_umount);
 512                destroy_super(s);
 513                return ERR_PTR(err);
 514        }
 515        s->s_type = type;
 516        strlcpy(s->s_id, type->name, sizeof(s->s_id));
 517        list_add_tail(&s->s_list, &super_blocks);
 518        hlist_add_head(&s->s_instances, &type->fs_supers);
 519        spin_unlock(&sb_lock);
 520        get_filesystem(type);
 521        register_shrinker(&s->s_shrink);
 522        return s;
 523}
 524
 525EXPORT_SYMBOL(sget);
 526
 527void drop_super(struct super_block *sb)
 528{
 529        up_read(&sb->s_umount);
 530        put_super(sb);
 531}
 532
 533EXPORT_SYMBOL(drop_super);
 534
 535/**
 536 *      iterate_supers - call function for all active superblocks
 537 *      @f: function to call
 538 *      @arg: argument to pass to it
 539 *
 540 *      Scans the superblock list and calls given function, passing it
 541 *      locked superblock and given argument.
 542 */
 543void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
 544{
 545        struct super_block *sb, *p = NULL;
 546
 547        spin_lock(&sb_lock);
 548        list_for_each_entry(sb, &super_blocks, s_list) {
 549                if (hlist_unhashed(&sb->s_instances))
 550                        continue;
 551                sb->s_count++;
 552                spin_unlock(&sb_lock);
 553
 554                down_read(&sb->s_umount);
 555                if (sb->s_root && (sb->s_flags & MS_BORN))
 556                        f(sb, arg);
 557                up_read(&sb->s_umount);
 558
 559                spin_lock(&sb_lock);
 560                if (p)
 561                        __put_super(p);
 562                p = sb;
 563        }
 564        if (p)
 565                __put_super(p);
 566        spin_unlock(&sb_lock);
 567}
 568
 569/**
 570 *      iterate_supers_type - call function for superblocks of given type
 571 *      @type: fs type
 572 *      @f: function to call
 573 *      @arg: argument to pass to it
 574 *
 575 *      Scans the superblock list and calls given function, passing it
 576 *      locked superblock and given argument.
 577 */
 578void iterate_supers_type(struct file_system_type *type,
 579        void (*f)(struct super_block *, void *), void *arg)
 580{
 581        struct super_block *sb, *p = NULL;
 582
 583        spin_lock(&sb_lock);
 584        hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
 585                sb->s_count++;
 586                spin_unlock(&sb_lock);
 587
 588                down_read(&sb->s_umount);
 589                if (sb->s_root && (sb->s_flags & MS_BORN))
 590                        f(sb, arg);
 591                up_read(&sb->s_umount);
 592
 593                spin_lock(&sb_lock);
 594                if (p)
 595                        __put_super(p);
 596                p = sb;
 597        }
 598        if (p)
 599                __put_super(p);
 600        spin_unlock(&sb_lock);
 601}
 602
 603EXPORT_SYMBOL(iterate_supers_type);
 604
 605/**
 606 *      get_super - get the superblock of a device
 607 *      @bdev: device to get the superblock for
 608 *      
 609 *      Scans the superblock list and finds the superblock of the file system
 610 *      mounted on the device given. %NULL is returned if no match is found.
 611 */
 612
 613struct super_block *get_super(struct block_device *bdev)
 614{
 615        struct super_block *sb;
 616
 617        if (!bdev)
 618                return NULL;
 619
 620        spin_lock(&sb_lock);
 621rescan:
 622        list_for_each_entry(sb, &super_blocks, s_list) {
 623                if (hlist_unhashed(&sb->s_instances))
 624                        continue;
 625                if (sb->s_bdev == bdev) {
 626                        sb->s_count++;
 627                        spin_unlock(&sb_lock);
 628                        down_read(&sb->s_umount);
 629                        /* still alive? */
 630                        if (sb->s_root && (sb->s_flags & MS_BORN))
 631                                return sb;
 632                        up_read(&sb->s_umount);
 633                        /* nope, got unmounted */
 634                        spin_lock(&sb_lock);
 635                        __put_super(sb);
 636                        goto rescan;
 637                }
 638        }
 639        spin_unlock(&sb_lock);
 640        return NULL;
 641}
 642
 643EXPORT_SYMBOL(get_super);
 644
 645/**
 646 *      get_super_thawed - get thawed superblock of a device
 647 *      @bdev: device to get the superblock for
 648 *
 649 *      Scans the superblock list and finds the superblock of the file system
 650 *      mounted on the device. The superblock is returned once it is thawed
 651 *      (or immediately if it was not frozen). %NULL is returned if no match
 652 *      is found.
 653 */
 654struct super_block *get_super_thawed(struct block_device *bdev)
 655{
 656        while (1) {
 657                struct super_block *s = get_super(bdev);
 658                if (!s || s->s_writers.frozen == SB_UNFROZEN)
 659                        return s;
 660                up_read(&s->s_umount);
 661                wait_event(s->s_writers.wait_unfrozen,
 662                           s->s_writers.frozen == SB_UNFROZEN);
 663                put_super(s);
 664        }
 665}
 666EXPORT_SYMBOL(get_super_thawed);
 667
 668/**
 669 * get_active_super - get an active reference to the superblock of a device
 670 * @bdev: device to get the superblock for
 671 *
 672 * Scans the superblock list and finds the superblock of the file system
 673 * mounted on the device given.  Returns the superblock with an active
 674 * reference or %NULL if none was found.
 675 */
 676struct super_block *get_active_super(struct block_device *bdev)
 677{
 678        struct super_block *sb;
 679
 680        if (!bdev)
 681                return NULL;
 682
 683restart:
 684        spin_lock(&sb_lock);
 685        list_for_each_entry(sb, &super_blocks, s_list) {
 686                if (hlist_unhashed(&sb->s_instances))
 687                        continue;
 688                if (sb->s_bdev == bdev) {
 689                        if (!grab_super(sb))
 690                                goto restart;
 691                        up_write(&sb->s_umount);
 692                        return sb;
 693                }
 694        }
 695        spin_unlock(&sb_lock);
 696        return NULL;
 697}
 698 
 699struct super_block *user_get_super(dev_t dev)
 700{
 701        struct super_block *sb;
 702
 703        spin_lock(&sb_lock);
 704rescan:
 705        list_for_each_entry(sb, &super_blocks, s_list) {
 706                if (hlist_unhashed(&sb->s_instances))
 707                        continue;
 708                if (sb->s_dev ==  dev) {
 709                        sb->s_count++;
 710                        spin_unlock(&sb_lock);
 711                        down_read(&sb->s_umount);
 712                        /* still alive? */
 713                        if (sb->s_root && (sb->s_flags & MS_BORN))
 714                                return sb;
 715                        up_read(&sb->s_umount);
 716                        /* nope, got unmounted */
 717                        spin_lock(&sb_lock);
 718                        __put_super(sb);
 719                        goto rescan;
 720                }
 721        }
 722        spin_unlock(&sb_lock);
 723        return NULL;
 724}
 725
 726/**
 727 *      do_remount_sb - asks filesystem to change mount options.
 728 *      @sb:    superblock in question
 729 *      @flags: numeric part of options
 730 *      @data:  the rest of options
 731 *      @force: whether or not to force the change
 732 *
 733 *      Alters the mount options of a mounted file system.
 734 */
 735int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
 736{
 737        int retval;
 738        int remount_ro;
 739
 740        if (sb->s_writers.frozen != SB_UNFROZEN)
 741                return -EBUSY;
 742
 743#ifdef CONFIG_BLOCK
 744        if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
 745                return -EACCES;
 746#endif
 747
 748        if (flags & MS_RDONLY)
 749                acct_auto_close(sb);
 750        shrink_dcache_sb(sb);
 751        sync_filesystem(sb);
 752
 753        remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
 754
 755        /* If we are remounting RDONLY and current sb is read/write,
 756           make sure there are no rw files opened */
 757        if (remount_ro) {
 758                if (force) {
 759                        mark_files_ro(sb);
 760                } else {
 761                        retval = sb_prepare_remount_readonly(sb);
 762                        if (retval)
 763                                return retval;
 764                }
 765        }
 766
 767        if (sb->s_op->remount_fs) {
 768                retval = sb->s_op->remount_fs(sb, &flags, data);
 769                if (retval) {
 770                        if (!force)
 771                                goto cancel_readonly;
 772                        /* If forced remount, go ahead despite any errors */
 773                        WARN(1, "forced remount of a %s fs returned %i\n",
 774                             sb->s_type->name, retval);
 775                }
 776        }
 777        sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
 778        /* Needs to be ordered wrt mnt_is_readonly() */
 779        smp_wmb();
 780        sb->s_readonly_remount = 0;
 781
 782        /*
 783         * Some filesystems modify their metadata via some other path than the
 784         * bdev buffer cache (eg. use a private mapping, or directories in
 785         * pagecache, etc). Also file data modifications go via their own
 786         * mappings. So If we try to mount readonly then copy the filesystem
 787         * from bdev, we could get stale data, so invalidate it to give a best
 788         * effort at coherency.
 789         */
 790        if (remount_ro && sb->s_bdev)
 791                invalidate_bdev(sb->s_bdev);
 792        return 0;
 793
 794cancel_readonly:
 795        sb->s_readonly_remount = 0;
 796        return retval;
 797}
 798
 799static void do_emergency_remount(struct work_struct *work)
 800{
 801        struct super_block *sb, *p = NULL;
 802
 803        spin_lock(&sb_lock);
 804        list_for_each_entry(sb, &super_blocks, s_list) {
 805                if (hlist_unhashed(&sb->s_instances))
 806                        continue;
 807                sb->s_count++;
 808                spin_unlock(&sb_lock);
 809                down_write(&sb->s_umount);
 810                if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
 811                    !(sb->s_flags & MS_RDONLY)) {
 812                        /*
 813                         * What lock protects sb->s_flags??
 814                         */
 815                        do_remount_sb(sb, MS_RDONLY, NULL, 1);
 816                }
 817                up_write(&sb->s_umount);
 818                spin_lock(&sb_lock);
 819                if (p)
 820                        __put_super(p);
 821                p = sb;
 822        }
 823        if (p)
 824                __put_super(p);
 825        spin_unlock(&sb_lock);
 826        kfree(work);
 827        printk("Emergency Remount complete\n");
 828}
 829
 830void emergency_remount(void)
 831{
 832        struct work_struct *work;
 833
 834        work = kmalloc(sizeof(*work), GFP_ATOMIC);
 835        if (work) {
 836                INIT_WORK(work, do_emergency_remount);
 837                schedule_work(work);
 838        }
 839}
 840
 841/*
 842 * Unnamed block devices are dummy devices used by virtual
 843 * filesystems which don't use real block-devices.  -- jrs
 844 */
 845
 846static DEFINE_IDA(unnamed_dev_ida);
 847static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
 848static int unnamed_dev_start = 0; /* don't bother trying below it */
 849
 850int get_anon_bdev(dev_t *p)
 851{
 852        int dev;
 853        int error;
 854
 855 retry:
 856        if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
 857                return -ENOMEM;
 858        spin_lock(&unnamed_dev_lock);
 859        error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
 860        if (!error)
 861                unnamed_dev_start = dev + 1;
 862        spin_unlock(&unnamed_dev_lock);
 863        if (error == -EAGAIN)
 864                /* We raced and lost with another CPU. */
 865                goto retry;
 866        else if (error)
 867                return -EAGAIN;
 868
 869        if (dev == (1 << MINORBITS)) {
 870                spin_lock(&unnamed_dev_lock);
 871                ida_remove(&unnamed_dev_ida, dev);
 872                if (unnamed_dev_start > dev)
 873                        unnamed_dev_start = dev;
 874                spin_unlock(&unnamed_dev_lock);
 875                return -EMFILE;
 876        }
 877        *p = MKDEV(0, dev & MINORMASK);
 878        return 0;
 879}
 880EXPORT_SYMBOL(get_anon_bdev);
 881
 882void free_anon_bdev(dev_t dev)
 883{
 884        int slot = MINOR(dev);
 885        spin_lock(&unnamed_dev_lock);
 886        ida_remove(&unnamed_dev_ida, slot);
 887        if (slot < unnamed_dev_start)
 888                unnamed_dev_start = slot;
 889        spin_unlock(&unnamed_dev_lock);
 890}
 891EXPORT_SYMBOL(free_anon_bdev);
 892
 893int set_anon_super(struct super_block *s, void *data)
 894{
 895        int error = get_anon_bdev(&s->s_dev);
 896        if (!error)
 897                s->s_bdi = &noop_backing_dev_info;
 898        return error;
 899}
 900
 901EXPORT_SYMBOL(set_anon_super);
 902
 903void kill_anon_super(struct super_block *sb)
 904{
 905        dev_t dev = sb->s_dev;
 906        generic_shutdown_super(sb);
 907        free_anon_bdev(dev);
 908}
 909
 910EXPORT_SYMBOL(kill_anon_super);
 911
 912void kill_litter_super(struct super_block *sb)
 913{
 914        if (sb->s_root)
 915                d_genocide(sb->s_root);
 916        kill_anon_super(sb);
 917}
 918
 919EXPORT_SYMBOL(kill_litter_super);
 920
 921static int ns_test_super(struct super_block *sb, void *data)
 922{
 923        return sb->s_fs_info == data;
 924}
 925
 926static int ns_set_super(struct super_block *sb, void *data)
 927{
 928        sb->s_fs_info = data;
 929        return set_anon_super(sb, NULL);
 930}
 931
 932struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
 933        void *data, int (*fill_super)(struct super_block *, void *, int))
 934{
 935        struct super_block *sb;
 936
 937        sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
 938        if (IS_ERR(sb))
 939                return ERR_CAST(sb);
 940
 941        if (!sb->s_root) {
 942                int err;
 943                err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
 944                if (err) {
 945                        deactivate_locked_super(sb);
 946                        return ERR_PTR(err);
 947                }
 948
 949                sb->s_flags |= MS_ACTIVE;
 950        }
 951
 952        return dget(sb->s_root);
 953}
 954
 955EXPORT_SYMBOL(mount_ns);
 956
 957#ifdef CONFIG_BLOCK
 958static int set_bdev_super(struct super_block *s, void *data)
 959{
 960        s->s_bdev = data;
 961        s->s_dev = s->s_bdev->bd_dev;
 962
 963        /*
 964         * We set the bdi here to the queue backing, file systems can
 965         * overwrite this in ->fill_super()
 966         */
 967        s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
 968        return 0;
 969}
 970
 971static int test_bdev_super(struct super_block *s, void *data)
 972{
 973        return (void *)s->s_bdev == data;
 974}
 975
 976struct dentry *mount_bdev(struct file_system_type *fs_type,
 977        int flags, const char *dev_name, void *data,
 978        int (*fill_super)(struct super_block *, void *, int))
 979{
 980        struct block_device *bdev;
 981        struct super_block *s;
 982        fmode_t mode = FMODE_READ | FMODE_EXCL;
 983        int error = 0;
 984
 985        if (!(flags & MS_RDONLY))
 986                mode |= FMODE_WRITE;
 987
 988        bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 989        if (IS_ERR(bdev))
 990                return ERR_CAST(bdev);
 991
 992        /*
 993         * once the super is inserted into the list by sget, s_umount
 994         * will protect the lockfs code from trying to start a snapshot
 995         * while we are mounting
 996         */
 997        mutex_lock(&bdev->bd_fsfreeze_mutex);
 998        if (bdev->bd_fsfreeze_count > 0) {
 999                mutex_unlock(&bdev->bd_fsfreeze_mutex);
1000                error = -EBUSY;
1001                goto error_bdev;
1002        }
1003        s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
1004                 bdev);
1005        mutex_unlock(&bdev->bd_fsfreeze_mutex);
1006        if (IS_ERR(s))
1007                goto error_s;
1008
1009        if (s->s_root) {
1010                if ((flags ^ s->s_flags) & MS_RDONLY) {
1011                        deactivate_locked_super(s);
1012                        error = -EBUSY;
1013                        goto error_bdev;
1014                }
1015
1016                /*
1017                 * s_umount nests inside bd_mutex during
1018                 * __invalidate_device().  blkdev_put() acquires
1019                 * bd_mutex and can't be called under s_umount.  Drop
1020                 * s_umount temporarily.  This is safe as we're
1021                 * holding an active reference.
1022                 */
1023                up_write(&s->s_umount);
1024                blkdev_put(bdev, mode);
1025                down_write(&s->s_umount);
1026        } else {
1027                char b[BDEVNAME_SIZE];
1028
1029                s->s_mode = mode;
1030                strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1031                sb_set_blocksize(s, block_size(bdev));
1032                error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1033                if (error) {
1034                        deactivate_locked_super(s);
1035                        goto error;
1036                }
1037
1038                s->s_flags |= MS_ACTIVE;
1039                bdev->bd_super = s;
1040        }
1041
1042        return dget(s->s_root);
1043
1044error_s:
1045        error = PTR_ERR(s);
1046error_bdev:
1047        blkdev_put(bdev, mode);
1048error:
1049        return ERR_PTR(error);
1050}
1051EXPORT_SYMBOL(mount_bdev);
1052
1053void kill_block_super(struct super_block *sb)
1054{
1055        struct block_device *bdev = sb->s_bdev;
1056        fmode_t mode = sb->s_mode;
1057
1058        bdev->bd_super = NULL;
1059        generic_shutdown_super(sb);
1060        sync_blockdev(bdev);
1061        WARN_ON_ONCE(!(mode & FMODE_EXCL));
1062        blkdev_put(bdev, mode | FMODE_EXCL);
1063}
1064
1065EXPORT_SYMBOL(kill_block_super);
1066#endif
1067
1068struct dentry *mount_nodev(struct file_system_type *fs_type,
1069        int flags, void *data,
1070        int (*fill_super)(struct super_block *, void *, int))
1071{
1072        int error;
1073        struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1074
1075        if (IS_ERR(s))
1076                return ERR_CAST(s);
1077
1078        error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1079        if (error) {
1080                deactivate_locked_super(s);
1081                return ERR_PTR(error);
1082        }
1083        s->s_flags |= MS_ACTIVE;
1084        return dget(s->s_root);
1085}
1086EXPORT_SYMBOL(mount_nodev);
1087
1088static int compare_single(struct super_block *s, void *p)
1089{
1090        return 1;
1091}
1092
1093struct dentry *mount_single(struct file_system_type *fs_type,
1094        int flags, void *data,
1095        int (*fill_super)(struct super_block *, void *, int))
1096{
1097        struct super_block *s;
1098        int error;
1099
1100        s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1101        if (IS_ERR(s))
1102                return ERR_CAST(s);
1103        if (!s->s_root) {
1104                error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1105                if (error) {
1106                        deactivate_locked_super(s);
1107                        return ERR_PTR(error);
1108                }
1109                s->s_flags |= MS_ACTIVE;
1110        } else {
1111                do_remount_sb(s, flags, data, 0);
1112        }
1113        return dget(s->s_root);
1114}
1115EXPORT_SYMBOL(mount_single);
1116
1117struct dentry *
1118mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1119{
1120        struct dentry *root;
1121        struct super_block *sb;
1122        char *secdata = NULL;
1123        int error = -ENOMEM;
1124
1125        if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1126                secdata = alloc_secdata();
1127                if (!secdata)
1128                        goto out;
1129
1130                error = security_sb_copy_data(data, secdata);
1131                if (error)
1132                        goto out_free_secdata;
1133        }
1134
1135        root = type->mount(type, flags, name, data);
1136        if (IS_ERR(root)) {
1137                error = PTR_ERR(root);
1138                goto out_free_secdata;
1139        }
1140        sb = root->d_sb;
1141        BUG_ON(!sb);
1142        WARN_ON(!sb->s_bdi);
1143        WARN_ON(sb->s_bdi == &default_backing_dev_info);
1144        sb->s_flags |= MS_BORN;
1145
1146        error = security_sb_kern_mount(sb, flags, secdata);
1147        if (error)
1148                goto out_sb;
1149
1150        /*
1151         * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1152         * but s_maxbytes was an unsigned long long for many releases. Throw
1153         * this warning for a little while to try and catch filesystems that
1154         * violate this rule.
1155         */
1156        WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1157                "negative value (%lld)\n", type->name, sb->s_maxbytes);
1158
1159        up_write(&sb->s_umount);
1160        free_secdata(secdata);
1161        return root;
1162out_sb:
1163        dput(root);
1164        deactivate_locked_super(sb);
1165out_free_secdata:
1166        free_secdata(secdata);
1167out:
1168        return ERR_PTR(error);
1169}
1170
1171/*
1172 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1173 * instead.
1174 */
1175void __sb_end_write(struct super_block *sb, int level)
1176{
1177        percpu_counter_dec(&sb->s_writers.counter[level-1]);
1178        /*
1179         * Make sure s_writers are updated before we wake up waiters in
1180         * freeze_super().
1181         */
1182        smp_mb();
1183        if (waitqueue_active(&sb->s_writers.wait))
1184                wake_up(&sb->s_writers.wait);
1185        rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
1186}
1187EXPORT_SYMBOL(__sb_end_write);
1188
1189#ifdef CONFIG_LOCKDEP
1190/*
1191 * We want lockdep to tell us about possible deadlocks with freezing but
1192 * it's it bit tricky to properly instrument it. Getting a freeze protection
1193 * works as getting a read lock but there are subtle problems. XFS for example
1194 * gets freeze protection on internal level twice in some cases, which is OK
1195 * only because we already hold a freeze protection also on higher level. Due
1196 * to these cases we have to tell lockdep we are doing trylock when we
1197 * already hold a freeze protection for a higher freeze level.
1198 */
1199static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
1200                                unsigned long ip)
1201{
1202        int i;
1203
1204        if (!trylock) {
1205                for (i = 0; i < level - 1; i++)
1206                        if (lock_is_held(&sb->s_writers.lock_map[i])) {
1207                                trylock = true;
1208                                break;
1209                        }
1210        }
1211        rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
1212}
1213#endif
1214
1215/*
1216 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1217 * instead.
1218 */
1219int __sb_start_write(struct super_block *sb, int level, bool wait)
1220{
1221retry:
1222        if (unlikely(sb->s_writers.frozen >= level)) {
1223                if (!wait)
1224                        return 0;
1225                wait_event(sb->s_writers.wait_unfrozen,
1226                           sb->s_writers.frozen < level);
1227        }
1228
1229#ifdef CONFIG_LOCKDEP
1230        acquire_freeze_lock(sb, level, !wait, _RET_IP_);
1231#endif
1232        percpu_counter_inc(&sb->s_writers.counter[level-1]);
1233        /*
1234         * Make sure counter is updated before we check for frozen.
1235         * freeze_super() first sets frozen and then checks the counter.
1236         */
1237        smp_mb();
1238        if (unlikely(sb->s_writers.frozen >= level)) {
1239                __sb_end_write(sb, level);
1240                goto retry;
1241        }
1242        return 1;
1243}
1244EXPORT_SYMBOL(__sb_start_write);
1245
1246/**
1247 * sb_wait_write - wait until all writers to given file system finish
1248 * @sb: the super for which we wait
1249 * @level: type of writers we wait for (normal vs page fault)
1250 *
1251 * This function waits until there are no writers of given type to given file
1252 * system. Caller of this function should make sure there can be no new writers
1253 * of type @level before calling this function. Otherwise this function can
1254 * livelock.
1255 */
1256static void sb_wait_write(struct super_block *sb, int level)
1257{
1258        s64 writers;
1259
1260        /*
1261         * We just cycle-through lockdep here so that it does not complain
1262         * about returning with lock to userspace
1263         */
1264        rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
1265        rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
1266
1267        do {
1268                DEFINE_WAIT(wait);
1269
1270                /*
1271                 * We use a barrier in prepare_to_wait() to separate setting
1272                 * of frozen and checking of the counter
1273                 */
1274                prepare_to_wait(&sb->s_writers.wait, &wait,
1275                                TASK_UNINTERRUPTIBLE);
1276
1277                writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
1278                if (writers)
1279                        schedule();
1280
1281                finish_wait(&sb->s_writers.wait, &wait);
1282        } while (writers);
1283}
1284
1285/**
1286 * freeze_super - lock the filesystem and force it into a consistent state
1287 * @sb: the super to lock
1288 *
1289 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1290 * freeze_fs.  Subsequent calls to this without first thawing the fs will return
1291 * -EBUSY.
1292 *
1293 * During this function, sb->s_writers.frozen goes through these values:
1294 *
1295 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1296 *
1297 * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
1298 * writes should be blocked, though page faults are still allowed. We wait for
1299 * all writes to complete and then proceed to the next stage.
1300 *
1301 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1302 * but internal fs threads can still modify the filesystem (although they
1303 * should not dirty new pages or inodes), writeback can run etc. After waiting
1304 * for all running page faults we sync the filesystem which will clean all
1305 * dirty pages and inodes (no new dirty pages or inodes can be created when
1306 * sync is running).
1307 *
1308 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1309 * modification are blocked (e.g. XFS preallocation truncation on inode
1310 * reclaim). This is usually implemented by blocking new transactions for
1311 * filesystems that have them and need this additional guard. After all
1312 * internal writers are finished we call ->freeze_fs() to finish filesystem
1313 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1314 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1315 *
1316 * sb->s_writers.frozen is protected by sb->s_umount.
1317 */
1318int freeze_super(struct super_block *sb)
1319{
1320        int ret;
1321
1322        atomic_inc(&sb->s_active);
1323        down_write(&sb->s_umount);
1324        if (sb->s_writers.frozen != SB_UNFROZEN) {
1325                deactivate_locked_super(sb);
1326                return -EBUSY;
1327        }
1328
1329        if (!(sb->s_flags & MS_BORN)) {
1330                up_write(&sb->s_umount);
1331                return 0;       /* sic - it's "nothing to do" */
1332        }
1333
1334        if (sb->s_flags & MS_RDONLY) {
1335                /* Nothing to do really... */
1336                sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1337                up_write(&sb->s_umount);
1338                return 0;
1339        }
1340
1341        /* From now on, no new normal writers can start */
1342        sb->s_writers.frozen = SB_FREEZE_WRITE;
1343        smp_wmb();
1344
1345        /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1346        up_write(&sb->s_umount);
1347
1348        sb_wait_write(sb, SB_FREEZE_WRITE);
1349
1350        /* Now we go and block page faults... */
1351        down_write(&sb->s_umount);
1352        sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1353        smp_wmb();
1354
1355        sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1356
1357        /* All writers are done so after syncing there won't be dirty data */
1358        sync_filesystem(sb);
1359
1360        /* Now wait for internal filesystem counter */
1361        sb->s_writers.frozen = SB_FREEZE_FS;
1362        smp_wmb();
1363        sb_wait_write(sb, SB_FREEZE_FS);
1364
1365        if (sb->s_op->freeze_fs) {
1366                ret = sb->s_op->freeze_fs(sb);
1367                if (ret) {
1368                        printk(KERN_ERR
1369                                "VFS:Filesystem freeze failed\n");
1370                        sb->s_writers.frozen = SB_UNFROZEN;
1371                        smp_wmb();
1372                        wake_up(&sb->s_writers.wait_unfrozen);
1373                        deactivate_locked_super(sb);
1374                        return ret;
1375                }
1376        }
1377        /*
1378         * This is just for debugging purposes so that fs can warn if it
1379         * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
1380         */
1381        sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1382        up_write(&sb->s_umount);
1383        return 0;
1384}
1385EXPORT_SYMBOL(freeze_super);
1386
1387/**
1388 * thaw_super -- unlock filesystem
1389 * @sb: the super to thaw
1390 *
1391 * Unlocks the filesystem and marks it writeable again after freeze_super().
1392 */
1393int thaw_super(struct super_block *sb)
1394{
1395        int error;
1396
1397        down_write(&sb->s_umount);
1398        if (sb->s_writers.frozen == SB_UNFROZEN) {
1399                up_write(&sb->s_umount);
1400                return -EINVAL;
1401        }
1402
1403        if (sb->s_flags & MS_RDONLY)
1404                goto out;
1405
1406        if (sb->s_op->unfreeze_fs) {
1407                error = sb->s_op->unfreeze_fs(sb);
1408                if (error) {
1409                        printk(KERN_ERR
1410                                "VFS:Filesystem thaw failed\n");
1411                        up_write(&sb->s_umount);
1412                        return error;
1413                }
1414        }
1415
1416out:
1417        sb->s_writers.frozen = SB_UNFROZEN;
1418        smp_wmb();
1419        wake_up(&sb->s_writers.wait_unfrozen);
1420        deactivate_locked_super(sb);
1421
1422        return 0;
1423}
1424EXPORT_SYMBOL(thaw_super);
1425
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.