linux/fs/super.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/super.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  super.c contains code to handle: - mount structures
   7 *                                   - super-block tables
   8 *                                   - filesystem drivers list
   9 *                                   - mount system call
  10 *                                   - umount system call
  11 *                                   - ustat system call
  12 *
  13 * GK 2/5/95  -  Changed to support mounting the root fs via NFS
  14 *
  15 *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
  16 *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
  17 *  Added options to /proc/mounts:
  18 *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
  19 *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
  20 *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
  21 */
  22
  23#include <linux/export.h>
  24#include <linux/slab.h>
  25#include <linux/acct.h>
  26#include <linux/blkdev.h>
  27#include <linux/mount.h>
  28#include <linux/security.h>
  29#include <linux/writeback.h>            /* for the emergency remount stuff */
  30#include <linux/idr.h>
  31#include <linux/mutex.h>
  32#include <linux/backing-dev.h>
  33#include <linux/rculist_bl.h>
  34#include <linux/cleancache.h>
  35#include <linux/fsnotify.h>
  36#include <linux/lockdep.h>
  37#include "internal.h"
  38
  39
  40LIST_HEAD(super_blocks);
  41DEFINE_SPINLOCK(sb_lock);
  42
  43static char *sb_writers_name[SB_FREEZE_LEVELS] = {
  44        "sb_writers",
  45        "sb_pagefaults",
  46        "sb_internal",
  47};
  48
  49/*
  50 * One thing we have to be careful of with a per-sb shrinker is that we don't
  51 * drop the last active reference to the superblock from within the shrinker.
  52 * If that happens we could trigger unregistering the shrinker from within the
  53 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
  54 * take a passive reference to the superblock to avoid this from occurring.
  55 */
  56static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
  57{
  58        struct super_block *sb;
  59        int     fs_objects = 0;
  60        int     total_objects;
  61
  62        sb = container_of(shrink, struct super_block, s_shrink);
  63
  64        /*
  65         * Deadlock avoidance.  We may hold various FS locks, and we don't want
  66         * to recurse into the FS that called us in clear_inode() and friends..
  67         */
  68        if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
  69                return -1;
  70
  71        if (!grab_super_passive(sb))
  72                return -1;
  73
  74        if (sb->s_op && sb->s_op->nr_cached_objects)
  75                fs_objects = sb->s_op->nr_cached_objects(sb);
  76
  77        total_objects = sb->s_nr_dentry_unused +
  78                        sb->s_nr_inodes_unused + fs_objects + 1;
  79
  80        if (sc->nr_to_scan) {
  81                int     dentries;
  82                int     inodes;
  83
  84                /* proportion the scan between the caches */
  85                dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
  86                                                        total_objects;
  87                inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
  88                                                        total_objects;
  89                if (fs_objects)
  90                        fs_objects = (sc->nr_to_scan * fs_objects) /
  91                                                        total_objects;
  92                /*
  93                 * prune the dcache first as the icache is pinned by it, then
  94                 * prune the icache, followed by the filesystem specific caches
  95                 */
  96                prune_dcache_sb(sb, dentries);
  97                prune_icache_sb(sb, inodes);
  98
  99                if (fs_objects && sb->s_op->free_cached_objects) {
 100                        sb->s_op->free_cached_objects(sb, fs_objects);
 101                        fs_objects = sb->s_op->nr_cached_objects(sb);
 102                }
 103                total_objects = sb->s_nr_dentry_unused +
 104                                sb->s_nr_inodes_unused + fs_objects;
 105        }
 106
 107        total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
 108        drop_super(sb);
 109        return total_objects;
 110}
 111
 112static int init_sb_writers(struct super_block *s, struct file_system_type *type)
 113{
 114        int err;
 115        int i;
 116
 117        for (i = 0; i < SB_FREEZE_LEVELS; i++) {
 118                err = percpu_counter_init(&s->s_writers.counter[i], 0);
 119                if (err < 0)
 120                        goto err_out;
 121                lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
 122                                 &type->s_writers_key[i], 0);
 123        }
 124        init_waitqueue_head(&s->s_writers.wait);
 125        init_waitqueue_head(&s->s_writers.wait_unfrozen);
 126        return 0;
 127err_out:
 128        while (--i >= 0)
 129                percpu_counter_destroy(&s->s_writers.counter[i]);
 130        return err;
 131}
 132
 133static void destroy_sb_writers(struct super_block *s)
 134{
 135        int i;
 136
 137        for (i = 0; i < SB_FREEZE_LEVELS; i++)
 138                percpu_counter_destroy(&s->s_writers.counter[i]);
 139}
 140
 141/**
 142 *      alloc_super     -       create new superblock
 143 *      @type:  filesystem type superblock should belong to
 144 *      @flags: the mount flags
 145 *
 146 *      Allocates and initializes a new &struct super_block.  alloc_super()
 147 *      returns a pointer new superblock or %NULL if allocation had failed.
 148 */
 149static struct super_block *alloc_super(struct file_system_type *type, int flags)
 150{
 151        struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
 152        static const struct super_operations default_op;
 153
 154        if (s) {
 155                if (security_sb_alloc(s)) {
 156                        /*
 157                         * We cannot call security_sb_free() without
 158                         * security_sb_alloc() succeeding. So bail out manually
 159                         */
 160                        kfree(s);
 161                        s = NULL;
 162                        goto out;
 163                }
 164#ifdef CONFIG_SMP
 165                s->s_files = alloc_percpu(struct list_head);
 166                if (!s->s_files)
 167                        goto err_out;
 168                else {
 169                        int i;
 170
 171                        for_each_possible_cpu(i)
 172                                INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
 173                }
 174#else
 175                INIT_LIST_HEAD(&s->s_files);
 176#endif
 177                if (init_sb_writers(s, type))
 178                        goto err_out;
 179                s->s_flags = flags;
 180                s->s_bdi = &default_backing_dev_info;
 181                INIT_HLIST_NODE(&s->s_instances);
 182                INIT_HLIST_BL_HEAD(&s->s_anon);
 183                INIT_LIST_HEAD(&s->s_inodes);
 184                INIT_LIST_HEAD(&s->s_dentry_lru);
 185                INIT_LIST_HEAD(&s->s_inode_lru);
 186                spin_lock_init(&s->s_inode_lru_lock);
 187                INIT_LIST_HEAD(&s->s_mounts);
 188                init_rwsem(&s->s_umount);
 189                mutex_init(&s->s_lock);
 190                lockdep_set_class(&s->s_umount, &type->s_umount_key);
 191                /*
 192                 * The locking rules for s_lock are up to the
 193                 * filesystem. For example ext3fs has different
 194                 * lock ordering than usbfs:
 195                 */
 196                lockdep_set_class(&s->s_lock, &type->s_lock_key);
 197                /*
 198                 * sget() can have s_umount recursion.
 199                 *
 200                 * When it cannot find a suitable sb, it allocates a new
 201                 * one (this one), and tries again to find a suitable old
 202                 * one.
 203                 *
 204                 * In case that succeeds, it will acquire the s_umount
 205                 * lock of the old one. Since these are clearly distrinct
 206                 * locks, and this object isn't exposed yet, there's no
 207                 * risk of deadlocks.
 208                 *
 209                 * Annotate this by putting this lock in a different
 210                 * subclass.
 211                 */
 212                down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
 213                s->s_count = 1;
 214                atomic_set(&s->s_active, 1);
 215                mutex_init(&s->s_vfs_rename_mutex);
 216                lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
 217                mutex_init(&s->s_dquot.dqio_mutex);
 218                mutex_init(&s->s_dquot.dqonoff_mutex);
 219                init_rwsem(&s->s_dquot.dqptr_sem);
 220                s->s_maxbytes = MAX_NON_LFS;
 221                s->s_op = &default_op;
 222                s->s_time_gran = 1000000000;
 223                s->cleancache_poolid = -1;
 224
 225                s->s_shrink.seeks = DEFAULT_SEEKS;
 226                s->s_shrink.shrink = prune_super;
 227                s->s_shrink.batch = 1024;
 228        }
 229out:
 230        return s;
 231err_out:
 232        security_sb_free(s);
 233#ifdef CONFIG_SMP
 234        if (s->s_files)
 235                free_percpu(s->s_files);
 236#endif
 237        destroy_sb_writers(s);
 238        kfree(s);
 239        s = NULL;
 240        goto out;
 241}
 242
 243/**
 244 *      destroy_super   -       frees a superblock
 245 *      @s: superblock to free
 246 *
 247 *      Frees a superblock.
 248 */
 249static inline void destroy_super(struct super_block *s)
 250{
 251#ifdef CONFIG_SMP
 252        free_percpu(s->s_files);
 253#endif
 254        destroy_sb_writers(s);
 255        security_sb_free(s);
 256        WARN_ON(!list_empty(&s->s_mounts));
 257        kfree(s->s_subtype);
 258        kfree(s->s_options);
 259        kfree(s);
 260}
 261
 262/* Superblock refcounting  */
 263
 264/*
 265 * Drop a superblock's refcount.  The caller must hold sb_lock.
 266 */
 267static void __put_super(struct super_block *sb)
 268{
 269        if (!--sb->s_count) {
 270                list_del_init(&sb->s_list);
 271                destroy_super(sb);
 272        }
 273}
 274
 275/**
 276 *      put_super       -       drop a temporary reference to superblock
 277 *      @sb: superblock in question
 278 *
 279 *      Drops a temporary reference, frees superblock if there's no
 280 *      references left.
 281 */
 282static void put_super(struct super_block *sb)
 283{
 284        spin_lock(&sb_lock);
 285        __put_super(sb);
 286        spin_unlock(&sb_lock);
 287}
 288
 289
 290/**
 291 *      deactivate_locked_super -       drop an active reference to superblock
 292 *      @s: superblock to deactivate
 293 *
 294 *      Drops an active reference to superblock, converting it into a temprory
 295 *      one if there is no other active references left.  In that case we
 296 *      tell fs driver to shut it down and drop the temporary reference we
 297 *      had just acquired.
 298 *
 299 *      Caller holds exclusive lock on superblock; that lock is released.
 300 */
 301void deactivate_locked_super(struct super_block *s)
 302{
 303        struct file_system_type *fs = s->s_type;
 304        if (atomic_dec_and_test(&s->s_active)) {
 305                cleancache_invalidate_fs(s);
 306                fs->kill_sb(s);
 307
 308                /* caches are now gone, we can safely kill the shrinker now */
 309                unregister_shrinker(&s->s_shrink);
 310
 311                /*
 312                 * We need to call rcu_barrier so all the delayed rcu free
 313                 * inodes are flushed before we release the fs module.
 314                 */
 315                rcu_barrier();
 316                put_filesystem(fs);
 317                put_super(s);
 318        } else {
 319                up_write(&s->s_umount);
 320        }
 321}
 322
 323EXPORT_SYMBOL(deactivate_locked_super);
 324
 325/**
 326 *      deactivate_super        -       drop an active reference to superblock
 327 *      @s: superblock to deactivate
 328 *
 329 *      Variant of deactivate_locked_super(), except that superblock is *not*
 330 *      locked by caller.  If we are going to drop the final active reference,
 331 *      lock will be acquired prior to that.
 332 */
 333void deactivate_super(struct super_block *s)
 334{
 335        if (!atomic_add_unless(&s->s_active, -1, 1)) {
 336                down_write(&s->s_umount);
 337                deactivate_locked_super(s);
 338        }
 339}
 340
 341EXPORT_SYMBOL(deactivate_super);
 342
 343/**
 344 *      grab_super - acquire an active reference
 345 *      @s: reference we are trying to make active
 346 *
 347 *      Tries to acquire an active reference.  grab_super() is used when we
 348 *      had just found a superblock in super_blocks or fs_type->fs_supers
 349 *      and want to turn it into a full-blown active reference.  grab_super()
 350 *      is called with sb_lock held and drops it.  Returns 1 in case of
 351 *      success, 0 if we had failed (superblock contents was already dead or
 352 *      dying when grab_super() had been called).
 353 */
 354static int grab_super(struct super_block *s) __releases(sb_lock)
 355{
 356        if (atomic_inc_not_zero(&s->s_active)) {
 357                spin_unlock(&sb_lock);
 358                return 1;
 359        }
 360        /* it's going away */
 361        s->s_count++;
 362        spin_unlock(&sb_lock);
 363        /* wait for it to die */
 364        down_write(&s->s_umount);
 365        up_write(&s->s_umount);
 366        put_super(s);
 367        return 0;
 368}
 369
 370/*
 371 *      grab_super_passive - acquire a passive reference
 372 *      @sb: reference we are trying to grab
 373 *
 374 *      Tries to acquire a passive reference. This is used in places where we
 375 *      cannot take an active reference but we need to ensure that the
 376 *      superblock does not go away while we are working on it. It returns
 377 *      false if a reference was not gained, and returns true with the s_umount
 378 *      lock held in read mode if a reference is gained. On successful return,
 379 *      the caller must drop the s_umount lock and the passive reference when
 380 *      done.
 381 */
 382bool grab_super_passive(struct super_block *sb)
 383{
 384        spin_lock(&sb_lock);
 385        if (hlist_unhashed(&sb->s_instances)) {
 386                spin_unlock(&sb_lock);
 387                return false;
 388        }
 389
 390        sb->s_count++;
 391        spin_unlock(&sb_lock);
 392
 393        if (down_read_trylock(&sb->s_umount)) {
 394                if (sb->s_root && (sb->s_flags & MS_BORN))
 395                        return true;
 396                up_read(&sb->s_umount);
 397        }
 398
 399        put_super(sb);
 400        return false;
 401}
 402
 403/*
 404 * Superblock locking.  We really ought to get rid of these two.
 405 */
 406void lock_super(struct super_block * sb)
 407{
 408        mutex_lock(&sb->s_lock);
 409}
 410
 411void unlock_super(struct super_block * sb)
 412{
 413        mutex_unlock(&sb->s_lock);
 414}
 415
 416EXPORT_SYMBOL(lock_super);
 417EXPORT_SYMBOL(unlock_super);
 418
 419/**
 420 *      generic_shutdown_super  -       common helper for ->kill_sb()
 421 *      @sb: superblock to kill
 422 *
 423 *      generic_shutdown_super() does all fs-independent work on superblock
 424 *      shutdown.  Typical ->kill_sb() should pick all fs-specific objects
 425 *      that need destruction out of superblock, call generic_shutdown_super()
 426 *      and release aforementioned objects.  Note: dentries and inodes _are_
 427 *      taken care of and do not need specific handling.
 428 *
 429 *      Upon calling this function, the filesystem may no longer alter or
 430 *      rearrange the set of dentries belonging to this super_block, nor may it
 431 *      change the attachments of dentries to inodes.
 432 */
 433void generic_shutdown_super(struct super_block *sb)
 434{
 435        const struct super_operations *sop = sb->s_op;
 436
 437        if (sb->s_root) {
 438                shrink_dcache_for_umount(sb);
 439                sync_filesystem(sb);
 440                sb->s_flags &= ~MS_ACTIVE;
 441
 442                fsnotify_unmount_inodes(&sb->s_inodes);
 443
 444                evict_inodes(sb);
 445
 446                if (sop->put_super)
 447                        sop->put_super(sb);
 448
 449                if (!list_empty(&sb->s_inodes)) {
 450                        printk("VFS: Busy inodes after unmount of %s. "
 451                           "Self-destruct in 5 seconds.  Have a nice day...\n",
 452                           sb->s_id);
 453                }
 454        }
 455        spin_lock(&sb_lock);
 456        /* should be initialized for __put_super_and_need_restart() */
 457        hlist_del_init(&sb->s_instances);
 458        spin_unlock(&sb_lock);
 459        up_write(&sb->s_umount);
 460}
 461
 462EXPORT_SYMBOL(generic_shutdown_super);
 463
 464/**
 465 *      sget    -       find or create a superblock
 466 *      @type:  filesystem type superblock should belong to
 467 *      @test:  comparison callback
 468 *      @set:   setup callback
 469 *      @flags: mount flags
 470 *      @data:  argument to each of them
 471 */
 472struct super_block *sget(struct file_system_type *type,
 473                        int (*test)(struct super_block *,void *),
 474                        int (*set)(struct super_block *,void *),
 475                        int flags,
 476                        void *data)
 477{
 478        struct super_block *s = NULL;
 479        struct hlist_node *node;
 480        struct super_block *old;
 481        int err;
 482
 483retry:
 484        spin_lock(&sb_lock);
 485        if (test) {
 486                hlist_for_each_entry(old, node, &type->fs_supers, s_instances) {
 487                        if (!test(old, data))
 488                                continue;
 489                        if (!grab_super(old))
 490                                goto retry;
 491                        if (s) {
 492                                up_write(&s->s_umount);
 493                                destroy_super(s);
 494                                s = NULL;
 495                        }
 496                        down_write(&old->s_umount);
 497                        if (unlikely(!(old->s_flags & MS_BORN))) {
 498                                deactivate_locked_super(old);
 499                                goto retry;
 500                        }
 501                        return old;
 502                }
 503        }
 504        if (!s) {
 505                spin_unlock(&sb_lock);
 506                s = alloc_super(type, flags);
 507                if (!s)
 508                        return ERR_PTR(-ENOMEM);
 509                goto retry;
 510        }
 511                
 512        err = set(s, data);
 513        if (err) {
 514                spin_unlock(&sb_lock);
 515                up_write(&s->s_umount);
 516                destroy_super(s);
 517                return ERR_PTR(err);
 518        }
 519        s->s_type = type;
 520        strlcpy(s->s_id, type->name, sizeof(s->s_id));
 521        list_add_tail(&s->s_list, &super_blocks);
 522        hlist_add_head(&s->s_instances, &type->fs_supers);
 523        spin_unlock(&sb_lock);
 524        get_filesystem(type);
 525        register_shrinker(&s->s_shrink);
 526        return s;
 527}
 528
 529EXPORT_SYMBOL(sget);
 530
 531void drop_super(struct super_block *sb)
 532{
 533        up_read(&sb->s_umount);
 534        put_super(sb);
 535}
 536
 537EXPORT_SYMBOL(drop_super);
 538
 539/**
 540 *      iterate_supers - call function for all active superblocks
 541 *      @f: function to call
 542 *      @arg: argument to pass to it
 543 *
 544 *      Scans the superblock list and calls given function, passing it
 545 *      locked superblock and given argument.
 546 */
 547void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
 548{
 549        struct super_block *sb, *p = NULL;
 550
 551        spin_lock(&sb_lock);
 552        list_for_each_entry(sb, &super_blocks, s_list) {
 553                if (hlist_unhashed(&sb->s_instances))
 554                        continue;
 555                sb->s_count++;
 556                spin_unlock(&sb_lock);
 557
 558                down_read(&sb->s_umount);
 559                if (sb->s_root && (sb->s_flags & MS_BORN))
 560                        f(sb, arg);
 561                up_read(&sb->s_umount);
 562
 563                spin_lock(&sb_lock);
 564                if (p)
 565                        __put_super(p);
 566                p = sb;
 567        }
 568        if (p)
 569                __put_super(p);
 570        spin_unlock(&sb_lock);
 571}
 572
 573/**
 574 *      iterate_supers_type - call function for superblocks of given type
 575 *      @type: fs type
 576 *      @f: function to call
 577 *      @arg: argument to pass to it
 578 *
 579 *      Scans the superblock list and calls given function, passing it
 580 *      locked superblock and given argument.
 581 */
 582void iterate_supers_type(struct file_system_type *type,
 583        void (*f)(struct super_block *, void *), void *arg)
 584{
 585        struct super_block *sb, *p = NULL;
 586        struct hlist_node *node;
 587
 588        spin_lock(&sb_lock);
 589        hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) {
 590                sb->s_count++;
 591                spin_unlock(&sb_lock);
 592
 593                down_read(&sb->s_umount);
 594                if (sb->s_root && (sb->s_flags & MS_BORN))
 595                        f(sb, arg);
 596                up_read(&sb->s_umount);
 597
 598                spin_lock(&sb_lock);
 599                if (p)
 600                        __put_super(p);
 601                p = sb;
 602        }
 603        if (p)
 604                __put_super(p);
 605        spin_unlock(&sb_lock);
 606}
 607
 608EXPORT_SYMBOL(iterate_supers_type);
 609
 610/**
 611 *      get_super - get the superblock of a device
 612 *      @bdev: device to get the superblock for
 613 *      
 614 *      Scans the superblock list and finds the superblock of the file system
 615 *      mounted on the device given. %NULL is returned if no match is found.
 616 */
 617
 618struct super_block *get_super(struct block_device *bdev)
 619{
 620        struct super_block *sb;
 621
 622        if (!bdev)
 623                return NULL;
 624
 625        spin_lock(&sb_lock);
 626rescan:
 627        list_for_each_entry(sb, &super_blocks, s_list) {
 628                if (hlist_unhashed(&sb->s_instances))
 629                        continue;
 630                if (sb->s_bdev == bdev) {
 631                        sb->s_count++;
 632                        spin_unlock(&sb_lock);
 633                        down_read(&sb->s_umount);
 634                        /* still alive? */
 635                        if (sb->s_root && (sb->s_flags & MS_BORN))
 636                                return sb;
 637                        up_read(&sb->s_umount);
 638                        /* nope, got unmounted */
 639                        spin_lock(&sb_lock);
 640                        __put_super(sb);
 641                        goto rescan;
 642                }
 643        }
 644        spin_unlock(&sb_lock);
 645        return NULL;
 646}
 647
 648EXPORT_SYMBOL(get_super);
 649
 650/**
 651 *      get_super_thawed - get thawed superblock of a device
 652 *      @bdev: device to get the superblock for
 653 *
 654 *      Scans the superblock list and finds the superblock of the file system
 655 *      mounted on the device. The superblock is returned once it is thawed
 656 *      (or immediately if it was not frozen). %NULL is returned if no match
 657 *      is found.
 658 */
 659struct super_block *get_super_thawed(struct block_device *bdev)
 660{
 661        while (1) {
 662                struct super_block *s = get_super(bdev);
 663                if (!s || s->s_writers.frozen == SB_UNFROZEN)
 664                        return s;
 665                up_read(&s->s_umount);
 666                wait_event(s->s_writers.wait_unfrozen,
 667                           s->s_writers.frozen == SB_UNFROZEN);
 668                put_super(s);
 669        }
 670}
 671EXPORT_SYMBOL(get_super_thawed);
 672
 673/**
 674 * get_active_super - get an active reference to the superblock of a device
 675 * @bdev: device to get the superblock for
 676 *
 677 * Scans the superblock list and finds the superblock of the file system
 678 * mounted on the device given.  Returns the superblock with an active
 679 * reference or %NULL if none was found.
 680 */
 681struct super_block *get_active_super(struct block_device *bdev)
 682{
 683        struct super_block *sb;
 684
 685        if (!bdev)
 686                return NULL;
 687
 688restart:
 689        spin_lock(&sb_lock);
 690        list_for_each_entry(sb, &super_blocks, s_list) {
 691                if (hlist_unhashed(&sb->s_instances))
 692                        continue;
 693                if (sb->s_bdev == bdev) {
 694                        if (grab_super(sb)) /* drops sb_lock */
 695                                return sb;
 696                        else
 697                                goto restart;
 698                }
 699        }
 700        spin_unlock(&sb_lock);
 701        return NULL;
 702}
 703 
 704struct super_block *user_get_super(dev_t dev)
 705{
 706        struct super_block *sb;
 707
 708        spin_lock(&sb_lock);
 709rescan:
 710        list_for_each_entry(sb, &super_blocks, s_list) {
 711                if (hlist_unhashed(&sb->s_instances))
 712                        continue;
 713                if (sb->s_dev ==  dev) {
 714                        sb->s_count++;
 715                        spin_unlock(&sb_lock);
 716                        down_read(&sb->s_umount);
 717                        /* still alive? */
 718                        if (sb->s_root && (sb->s_flags & MS_BORN))
 719                                return sb;
 720                        up_read(&sb->s_umount);
 721                        /* nope, got unmounted */
 722                        spin_lock(&sb_lock);
 723                        __put_super(sb);
 724                        goto rescan;
 725                }
 726        }
 727        spin_unlock(&sb_lock);
 728        return NULL;
 729}
 730
 731/**
 732 *      do_remount_sb - asks filesystem to change mount options.
 733 *      @sb:    superblock in question
 734 *      @flags: numeric part of options
 735 *      @data:  the rest of options
 736 *      @force: whether or not to force the change
 737 *
 738 *      Alters the mount options of a mounted file system.
 739 */
 740int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
 741{
 742        int retval;
 743        int remount_ro;
 744
 745        if (sb->s_writers.frozen != SB_UNFROZEN)
 746                return -EBUSY;
 747
 748#ifdef CONFIG_BLOCK
 749        if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
 750                return -EACCES;
 751#endif
 752
 753        if (flags & MS_RDONLY)
 754                acct_auto_close(sb);
 755        shrink_dcache_sb(sb);
 756        sync_filesystem(sb);
 757
 758        remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
 759
 760        /* If we are remounting RDONLY and current sb is read/write,
 761           make sure there are no rw files opened */
 762        if (remount_ro) {
 763                if (force) {
 764                        mark_files_ro(sb);
 765                } else {
 766                        retval = sb_prepare_remount_readonly(sb);
 767                        if (retval)
 768                                return retval;
 769                }
 770        }
 771
 772        if (sb->s_op->remount_fs) {
 773                retval = sb->s_op->remount_fs(sb, &flags, data);
 774                if (retval) {
 775                        if (!force)
 776                                goto cancel_readonly;
 777                        /* If forced remount, go ahead despite any errors */
 778                        WARN(1, "forced remount of a %s fs returned %i\n",
 779                             sb->s_type->name, retval);
 780                }
 781        }
 782        sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
 783        /* Needs to be ordered wrt mnt_is_readonly() */
 784        smp_wmb();
 785        sb->s_readonly_remount = 0;
 786
 787        /*
 788         * Some filesystems modify their metadata via some other path than the
 789         * bdev buffer cache (eg. use a private mapping, or directories in
 790         * pagecache, etc). Also file data modifications go via their own
 791         * mappings. So If we try to mount readonly then copy the filesystem
 792         * from bdev, we could get stale data, so invalidate it to give a best
 793         * effort at coherency.
 794         */
 795        if (remount_ro && sb->s_bdev)
 796                invalidate_bdev(sb->s_bdev);
 797        return 0;
 798
 799cancel_readonly:
 800        sb->s_readonly_remount = 0;
 801        return retval;
 802}
 803
 804static void do_emergency_remount(struct work_struct *work)
 805{
 806        struct super_block *sb, *p = NULL;
 807
 808        spin_lock(&sb_lock);
 809        list_for_each_entry(sb, &super_blocks, s_list) {
 810                if (hlist_unhashed(&sb->s_instances))
 811                        continue;
 812                sb->s_count++;
 813                spin_unlock(&sb_lock);
 814                down_write(&sb->s_umount);
 815                if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
 816                    !(sb->s_flags & MS_RDONLY)) {
 817                        /*
 818                         * What lock protects sb->s_flags??
 819                         */
 820                        do_remount_sb(sb, MS_RDONLY, NULL, 1);
 821                }
 822                up_write(&sb->s_umount);
 823                spin_lock(&sb_lock);
 824                if (p)
 825                        __put_super(p);
 826                p = sb;
 827        }
 828        if (p)
 829                __put_super(p);
 830        spin_unlock(&sb_lock);
 831        kfree(work);
 832        printk("Emergency Remount complete\n");
 833}
 834
 835void emergency_remount(void)
 836{
 837        struct work_struct *work;
 838
 839        work = kmalloc(sizeof(*work), GFP_ATOMIC);
 840        if (work) {
 841                INIT_WORK(work, do_emergency_remount);
 842                schedule_work(work);
 843        }
 844}
 845
 846/*
 847 * Unnamed block devices are dummy devices used by virtual
 848 * filesystems which don't use real block-devices.  -- jrs
 849 */
 850
 851static DEFINE_IDA(unnamed_dev_ida);
 852static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
 853static int unnamed_dev_start = 0; /* don't bother trying below it */
 854
 855int get_anon_bdev(dev_t *p)
 856{
 857        int dev;
 858        int error;
 859
 860 retry:
 861        if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
 862                return -ENOMEM;
 863        spin_lock(&unnamed_dev_lock);
 864        error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
 865        if (!error)
 866                unnamed_dev_start = dev + 1;
 867        spin_unlock(&unnamed_dev_lock);
 868        if (error == -EAGAIN)
 869                /* We raced and lost with another CPU. */
 870                goto retry;
 871        else if (error)
 872                return -EAGAIN;
 873
 874        if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
 875                spin_lock(&unnamed_dev_lock);
 876                ida_remove(&unnamed_dev_ida, dev);
 877                if (unnamed_dev_start > dev)
 878                        unnamed_dev_start = dev;
 879                spin_unlock(&unnamed_dev_lock);
 880                return -EMFILE;
 881        }
 882        *p = MKDEV(0, dev & MINORMASK);
 883        return 0;
 884}
 885EXPORT_SYMBOL(get_anon_bdev);
 886
 887void free_anon_bdev(dev_t dev)
 888{
 889        int slot = MINOR(dev);
 890        spin_lock(&unnamed_dev_lock);
 891        ida_remove(&unnamed_dev_ida, slot);
 892        if (slot < unnamed_dev_start)
 893                unnamed_dev_start = slot;
 894        spin_unlock(&unnamed_dev_lock);
 895}
 896EXPORT_SYMBOL(free_anon_bdev);
 897
 898int set_anon_super(struct super_block *s, void *data)
 899{
 900        int error = get_anon_bdev(&s->s_dev);
 901        if (!error)
 902                s->s_bdi = &noop_backing_dev_info;
 903        return error;
 904}
 905
 906EXPORT_SYMBOL(set_anon_super);
 907
 908void kill_anon_super(struct super_block *sb)
 909{
 910        dev_t dev = sb->s_dev;
 911        generic_shutdown_super(sb);
 912        free_anon_bdev(dev);
 913}
 914
 915EXPORT_SYMBOL(kill_anon_super);
 916
 917void kill_litter_super(struct super_block *sb)
 918{
 919        if (sb->s_root)
 920                d_genocide(sb->s_root);
 921        kill_anon_super(sb);
 922}
 923
 924EXPORT_SYMBOL(kill_litter_super);
 925
 926static int ns_test_super(struct super_block *sb, void *data)
 927{
 928        return sb->s_fs_info == data;
 929}
 930
 931static int ns_set_super(struct super_block *sb, void *data)
 932{
 933        sb->s_fs_info = data;
 934        return set_anon_super(sb, NULL);
 935}
 936
 937struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
 938        void *data, int (*fill_super)(struct super_block *, void *, int))
 939{
 940        struct super_block *sb;
 941
 942        sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
 943        if (IS_ERR(sb))
 944                return ERR_CAST(sb);
 945
 946        if (!sb->s_root) {
 947                int err;
 948                err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
 949                if (err) {
 950                        deactivate_locked_super(sb);
 951                        return ERR_PTR(err);
 952                }
 953
 954                sb->s_flags |= MS_ACTIVE;
 955        }
 956
 957        return dget(sb->s_root);
 958}
 959
 960EXPORT_SYMBOL(mount_ns);
 961
 962#ifdef CONFIG_BLOCK
 963static int set_bdev_super(struct super_block *s, void *data)
 964{
 965        s->s_bdev = data;
 966        s->s_dev = s->s_bdev->bd_dev;
 967
 968        /*
 969         * We set the bdi here to the queue backing, file systems can
 970         * overwrite this in ->fill_super()
 971         */
 972        s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
 973        return 0;
 974}
 975
 976static int test_bdev_super(struct super_block *s, void *data)
 977{
 978        return (void *)s->s_bdev == data;
 979}
 980
 981struct dentry *mount_bdev(struct file_system_type *fs_type,
 982        int flags, const char *dev_name, void *data,
 983        int (*fill_super)(struct super_block *, void *, int))
 984{
 985        struct block_device *bdev;
 986        struct super_block *s;
 987        fmode_t mode = FMODE_READ | FMODE_EXCL;
 988        int error = 0;
 989
 990        if (!(flags & MS_RDONLY))
 991                mode |= FMODE_WRITE;
 992
 993        bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 994        if (IS_ERR(bdev))
 995                return ERR_CAST(bdev);
 996
 997        /*
 998         * once the super is inserted into the list by sget, s_umount
 999         * will protect the lockfs code from trying to start a snapshot
1000         * while we are mounting
1001         */
1002        mutex_lock(&bdev->bd_fsfreeze_mutex);
1003        if (bdev->bd_fsfreeze_count > 0) {
1004                mutex_unlock(&bdev->bd_fsfreeze_mutex);
1005                error = -EBUSY;
1006                goto error_bdev;
1007        }
1008        s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
1009                 bdev);
1010        mutex_unlock(&bdev->bd_fsfreeze_mutex);
1011        if (IS_ERR(s))
1012                goto error_s;
1013
1014        if (s->s_root) {
1015                if ((flags ^ s->s_flags) & MS_RDONLY) {
1016                        deactivate_locked_super(s);
1017                        error = -EBUSY;
1018                        goto error_bdev;
1019                }
1020
1021                /*
1022                 * s_umount nests inside bd_mutex during
1023                 * __invalidate_device().  blkdev_put() acquires
1024                 * bd_mutex and can't be called under s_umount.  Drop
1025                 * s_umount temporarily.  This is safe as we're
1026                 * holding an active reference.
1027                 */
1028                up_write(&s->s_umount);
1029                blkdev_put(bdev, mode);
1030                down_write(&s->s_umount);
1031        } else {
1032                char b[BDEVNAME_SIZE];
1033
1034                s->s_mode = mode;
1035                strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1036                sb_set_blocksize(s, block_size(bdev));
1037                error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1038                if (error) {
1039                        deactivate_locked_super(s);
1040                        goto error;
1041                }
1042
1043                s->s_flags |= MS_ACTIVE;
1044                bdev->bd_super = s;
1045        }
1046
1047        return dget(s->s_root);
1048
1049error_s:
1050        error = PTR_ERR(s);
1051error_bdev:
1052        blkdev_put(bdev, mode);
1053error:
1054        return ERR_PTR(error);
1055}
1056EXPORT_SYMBOL(mount_bdev);
1057
1058void kill_block_super(struct super_block *sb)
1059{
1060        struct block_device *bdev = sb->s_bdev;
1061        fmode_t mode = sb->s_mode;
1062
1063        bdev->bd_super = NULL;
1064        generic_shutdown_super(sb);
1065        sync_blockdev(bdev);
1066        WARN_ON_ONCE(!(mode & FMODE_EXCL));
1067        blkdev_put(bdev, mode | FMODE_EXCL);
1068}
1069
1070EXPORT_SYMBOL(kill_block_super);
1071#endif
1072
1073struct dentry *mount_nodev(struct file_system_type *fs_type,
1074        int flags, void *data,
1075        int (*fill_super)(struct super_block *, void *, int))
1076{
1077        int error;
1078        struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1079
1080        if (IS_ERR(s))
1081                return ERR_CAST(s);
1082
1083        error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1084        if (error) {
1085                deactivate_locked_super(s);
1086                return ERR_PTR(error);
1087        }
1088        s->s_flags |= MS_ACTIVE;
1089        return dget(s->s_root);
1090}
1091EXPORT_SYMBOL(mount_nodev);
1092
1093static int compare_single(struct super_block *s, void *p)
1094{
1095        return 1;
1096}
1097
1098struct dentry *mount_single(struct file_system_type *fs_type,
1099        int flags, void *data,
1100        int (*fill_super)(struct super_block *, void *, int))
1101{
1102        struct super_block *s;
1103        int error;
1104
1105        s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1106        if (IS_ERR(s))
1107                return ERR_CAST(s);
1108        if (!s->s_root) {
1109                error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1110                if (error) {
1111                        deactivate_locked_super(s);
1112                        return ERR_PTR(error);
1113                }
1114                s->s_flags |= MS_ACTIVE;
1115        } else {
1116                do_remount_sb(s, flags, data, 0);
1117        }
1118        return dget(s->s_root);
1119}
1120EXPORT_SYMBOL(mount_single);
1121
1122struct dentry *
1123mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1124{
1125        struct dentry *root;
1126        struct super_block *sb;
1127        char *secdata = NULL;
1128        int error = -ENOMEM;
1129
1130        if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1131                secdata = alloc_secdata();
1132                if (!secdata)
1133                        goto out;
1134
1135                error = security_sb_copy_data(data, secdata);
1136                if (error)
1137                        goto out_free_secdata;
1138        }
1139
1140        root = type->mount(type, flags, name, data);
1141        if (IS_ERR(root)) {
1142                error = PTR_ERR(root);
1143                goto out_free_secdata;
1144        }
1145        sb = root->d_sb;
1146        BUG_ON(!sb);
1147        WARN_ON(!sb->s_bdi);
1148        WARN_ON(sb->s_bdi == &default_backing_dev_info);
1149        sb->s_flags |= MS_BORN;
1150
1151        error = security_sb_kern_mount(sb, flags, secdata);
1152        if (error)
1153                goto out_sb;
1154
1155        /*
1156         * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1157         * but s_maxbytes was an unsigned long long for many releases. Throw
1158         * this warning for a little while to try and catch filesystems that
1159         * violate this rule.
1160         */
1161        WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1162                "negative value (%lld)\n", type->name, sb->s_maxbytes);
1163
1164        up_write(&sb->s_umount);
1165        free_secdata(secdata);
1166        return root;
1167out_sb:
1168        dput(root);
1169        deactivate_locked_super(sb);
1170out_free_secdata:
1171        free_secdata(secdata);
1172out:
1173        return ERR_PTR(error);
1174}
1175
1176/*
1177 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1178 * instead.
1179 */
1180void __sb_end_write(struct super_block *sb, int level)
1181{
1182        percpu_counter_dec(&sb->s_writers.counter[level-1]);
1183        /*
1184         * Make sure s_writers are updated before we wake up waiters in
1185         * freeze_super().
1186         */
1187        smp_mb();
1188        if (waitqueue_active(&sb->s_writers.wait))
1189                wake_up(&sb->s_writers.wait);
1190        rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
1191}
1192EXPORT_SYMBOL(__sb_end_write);
1193
1194#ifdef CONFIG_LOCKDEP
1195/*
1196 * We want lockdep to tell us about possible deadlocks with freezing but
1197 * it's it bit tricky to properly instrument it. Getting a freeze protection
1198 * works as getting a read lock but there are subtle problems. XFS for example
1199 * gets freeze protection on internal level twice in some cases, which is OK
1200 * only because we already hold a freeze protection also on higher level. Due
1201 * to these cases we have to tell lockdep we are doing trylock when we
1202 * already hold a freeze protection for a higher freeze level.
1203 */
1204static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
1205                                unsigned long ip)
1206{
1207        int i;
1208
1209        if (!trylock) {
1210                for (i = 0; i < level - 1; i++)
1211                        if (lock_is_held(&sb->s_writers.lock_map[i])) {
1212                                trylock = true;
1213                                break;
1214                        }
1215        }
1216        rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
1217}
1218#endif
1219
1220/*
1221 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1222 * instead.
1223 */
1224int __sb_start_write(struct super_block *sb, int level, bool wait)
1225{
1226retry:
1227        if (unlikely(sb->s_writers.frozen >= level)) {
1228                if (!wait)
1229                        return 0;
1230                wait_event(sb->s_writers.wait_unfrozen,
1231                           sb->s_writers.frozen < level);
1232        }
1233
1234#ifdef CONFIG_LOCKDEP
1235        acquire_freeze_lock(sb, level, !wait, _RET_IP_);
1236#endif
1237        percpu_counter_inc(&sb->s_writers.counter[level-1]);
1238        /*
1239         * Make sure counter is updated before we check for frozen.
1240         * freeze_super() first sets frozen and then checks the counter.
1241         */
1242        smp_mb();
1243        if (unlikely(sb->s_writers.frozen >= level)) {
1244                __sb_end_write(sb, level);
1245                goto retry;
1246        }
1247        return 1;
1248}
1249EXPORT_SYMBOL(__sb_start_write);
1250
1251/**
1252 * sb_wait_write - wait until all writers to given file system finish
1253 * @sb: the super for which we wait
1254 * @level: type of writers we wait for (normal vs page fault)
1255 *
1256 * This function waits until there are no writers of given type to given file
1257 * system. Caller of this function should make sure there can be no new writers
1258 * of type @level before calling this function. Otherwise this function can
1259 * livelock.
1260 */
1261static void sb_wait_write(struct super_block *sb, int level)
1262{
1263        s64 writers;
1264
1265        /*
1266         * We just cycle-through lockdep here so that it does not complain
1267         * about returning with lock to userspace
1268         */
1269        rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
1270        rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
1271
1272        do {
1273                DEFINE_WAIT(wait);
1274
1275                /*
1276                 * We use a barrier in prepare_to_wait() to separate setting
1277                 * of frozen and checking of the counter
1278                 */
1279                prepare_to_wait(&sb->s_writers.wait, &wait,
1280                                TASK_UNINTERRUPTIBLE);
1281
1282                writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
1283                if (writers)
1284                        schedule();
1285
1286                finish_wait(&sb->s_writers.wait, &wait);
1287        } while (writers);
1288}
1289
1290/**
1291 * freeze_super - lock the filesystem and force it into a consistent state
1292 * @sb: the super to lock
1293 *
1294 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1295 * freeze_fs.  Subsequent calls to this without first thawing the fs will return
1296 * -EBUSY.
1297 *
1298 * During this function, sb->s_writers.frozen goes through these values:
1299 *
1300 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1301 *
1302 * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
1303 * writes should be blocked, though page faults are still allowed. We wait for
1304 * all writes to complete and then proceed to the next stage.
1305 *
1306 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1307 * but internal fs threads can still modify the filesystem (although they
1308 * should not dirty new pages or inodes), writeback can run etc. After waiting
1309 * for all running page faults we sync the filesystem which will clean all
1310 * dirty pages and inodes (no new dirty pages or inodes can be created when
1311 * sync is running).
1312 *
1313 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1314 * modification are blocked (e.g. XFS preallocation truncation on inode
1315 * reclaim). This is usually implemented by blocking new transactions for
1316 * filesystems that have them and need this additional guard. After all
1317 * internal writers are finished we call ->freeze_fs() to finish filesystem
1318 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1319 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1320 *
1321 * sb->s_writers.frozen is protected by sb->s_umount.
1322 */
1323int freeze_super(struct super_block *sb)
1324{
1325        int ret;
1326
1327        atomic_inc(&sb->s_active);
1328        down_write(&sb->s_umount);
1329        if (sb->s_writers.frozen != SB_UNFROZEN) {
1330                deactivate_locked_super(sb);
1331                return -EBUSY;
1332        }
1333
1334        if (!(sb->s_flags & MS_BORN)) {
1335                up_write(&sb->s_umount);
1336                return 0;       /* sic - it's "nothing to do" */
1337        }
1338
1339        if (sb->s_flags & MS_RDONLY) {
1340                /* Nothing to do really... */
1341                sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1342                up_write(&sb->s_umount);
1343                return 0;
1344        }
1345
1346        /* From now on, no new normal writers can start */
1347        sb->s_writers.frozen = SB_FREEZE_WRITE;
1348        smp_wmb();
1349
1350        /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1351        up_write(&sb->s_umount);
1352
1353        sb_wait_write(sb, SB_FREEZE_WRITE);
1354
1355        /* Now we go and block page faults... */
1356        down_write(&sb->s_umount);
1357        sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1358        smp_wmb();
1359
1360        sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1361
1362        /* All writers are done so after syncing there won't be dirty data */
1363        sync_filesystem(sb);
1364
1365        /* Now wait for internal filesystem counter */
1366        sb->s_writers.frozen = SB_FREEZE_FS;
1367        smp_wmb();
1368        sb_wait_write(sb, SB_FREEZE_FS);
1369
1370        if (sb->s_op->freeze_fs) {
1371                ret = sb->s_op->freeze_fs(sb);
1372                if (ret) {
1373                        printk(KERN_ERR
1374                                "VFS:Filesystem freeze failed\n");
1375                        sb->s_writers.frozen = SB_UNFROZEN;
1376                        smp_wmb();
1377                        wake_up(&sb->s_writers.wait_unfrozen);
1378                        deactivate_locked_super(sb);
1379                        return ret;
1380                }
1381        }
1382        /*
1383         * This is just for debugging purposes so that fs can warn if it
1384         * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
1385         */
1386        sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1387        up_write(&sb->s_umount);
1388        return 0;
1389}
1390EXPORT_SYMBOL(freeze_super);
1391
1392/**
1393 * thaw_super -- unlock filesystem
1394 * @sb: the super to thaw
1395 *
1396 * Unlocks the filesystem and marks it writeable again after freeze_super().
1397 */
1398int thaw_super(struct super_block *sb)
1399{
1400        int error;
1401
1402        down_write(&sb->s_umount);
1403        if (sb->s_writers.frozen == SB_UNFROZEN) {
1404                up_write(&sb->s_umount);
1405                return -EINVAL;
1406        }
1407
1408        if (sb->s_flags & MS_RDONLY)
1409                goto out;
1410
1411        if (sb->s_op->unfreeze_fs) {
1412                error = sb->s_op->unfreeze_fs(sb);
1413                if (error) {
1414                        printk(KERN_ERR
1415                                "VFS:Filesystem thaw failed\n");
1416                        up_write(&sb->s_umount);
1417                        return error;
1418                }
1419        }
1420
1421out:
1422        sb->s_writers.frozen = SB_UNFROZEN;
1423        smp_wmb();
1424        wake_up(&sb->s_writers.wait_unfrozen);
1425        deactivate_locked_super(sb);
1426
1427        return 0;
1428}
1429EXPORT_SYMBOL(thaw_super);
1430
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.