linux/fs/fs-writeback.c
<<
>>
Prefs
   1/*
   2 * fs/fs-writeback.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 *
   6 * Contains all the functions related to writing back and waiting
   7 * upon dirty inodes against superblocks, and writing back dirty
   8 * pages against inodes.  ie: data writeback.  Writeout of the
   9 * inode itself is not handled here.
  10 *
  11 * 10Apr2002    Andrew Morton
  12 *              Split out of fs/inode.c
  13 *              Additions for address_space-based writeback
  14 */
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/spinlock.h>
  19#include <linux/sched.h>
  20#include <linux/fs.h>
  21#include <linux/mm.h>
  22#include <linux/writeback.h>
  23#include <linux/blkdev.h>
  24#include <linux/backing-dev.h>
  25#include <linux/buffer_head.h>
  26#include "internal.h"
  27
  28
  29/**
  30 * writeback_acquire - attempt to get exclusive writeback access to a device
  31 * @bdi: the device's backing_dev_info structure
  32 *
  33 * It is a waste of resources to have more than one pdflush thread blocked on
  34 * a single request queue.  Exclusion at the request_queue level is obtained
  35 * via a flag in the request_queue's backing_dev_info.state.
  36 *
  37 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
  38 * unless they implement their own.  Which is somewhat inefficient, as this
  39 * may prevent concurrent writeback against multiple devices.
  40 */
  41static int writeback_acquire(struct backing_dev_info *bdi)
  42{
  43        return !test_and_set_bit(BDI_pdflush, &bdi->state);
  44}
  45
  46/**
  47 * writeback_in_progress - determine whether there is writeback in progress
  48 * @bdi: the device's backing_dev_info structure.
  49 *
  50 * Determine whether there is writeback in progress against a backing device.
  51 */
  52int writeback_in_progress(struct backing_dev_info *bdi)
  53{
  54        return test_bit(BDI_pdflush, &bdi->state);
  55}
  56
  57/**
  58 * writeback_release - relinquish exclusive writeback access against a device.
  59 * @bdi: the device's backing_dev_info structure
  60 */
  61static void writeback_release(struct backing_dev_info *bdi)
  62{
  63        BUG_ON(!writeback_in_progress(bdi));
  64        clear_bit(BDI_pdflush, &bdi->state);
  65}
  66
  67/**
  68 *      __mark_inode_dirty -    internal function
  69 *      @inode: inode to mark
  70 *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
  71 *      Mark an inode as dirty. Callers should use mark_inode_dirty or
  72 *      mark_inode_dirty_sync.
  73 *
  74 * Put the inode on the super block's dirty list.
  75 *
  76 * CAREFUL! We mark it dirty unconditionally, but move it onto the
  77 * dirty list only if it is hashed or if it refers to a blockdev.
  78 * If it was not hashed, it will never be added to the dirty list
  79 * even if it is later hashed, as it will have been marked dirty already.
  80 *
  81 * In short, make sure you hash any inodes _before_ you start marking
  82 * them dirty.
  83 *
  84 * This function *must* be atomic for the I_DIRTY_PAGES case -
  85 * set_page_dirty() is called under spinlock in several places.
  86 *
  87 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
  88 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
  89 * the kernel-internal blockdev inode represents the dirtying time of the
  90 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
  91 * page->mapping->host, so the page-dirtying time is recorded in the internal
  92 * blockdev inode.
  93 */
  94void __mark_inode_dirty(struct inode *inode, int flags)
  95{
  96        struct super_block *sb = inode->i_sb;
  97
  98        /*
  99         * Don't do this for I_DIRTY_PAGES - that doesn't actually
 100         * dirty the inode itself
 101         */
 102        if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
 103                if (sb->s_op->dirty_inode)
 104                        sb->s_op->dirty_inode(inode);
 105        }
 106
 107        /*
 108         * make sure that changes are seen by all cpus before we test i_state
 109         * -- mikulas
 110         */
 111        smp_mb();
 112
 113        /* avoid the locking if we can */
 114        if ((inode->i_state & flags) == flags)
 115                return;
 116
 117        if (unlikely(block_dump)) {
 118                struct dentry *dentry = NULL;
 119                const char *name = "?";
 120
 121                if (!list_empty(&inode->i_dentry)) {
 122                        dentry = list_entry(inode->i_dentry.next,
 123                                            struct dentry, d_alias);
 124                        if (dentry && dentry->d_name.name)
 125                                name = (const char *) dentry->d_name.name;
 126                }
 127
 128                if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
 129                        printk(KERN_DEBUG
 130                               "%s(%d): dirtied inode %lu (%s) on %s\n",
 131                               current->comm, task_pid_nr(current), inode->i_ino,
 132                               name, inode->i_sb->s_id);
 133        }
 134
 135        spin_lock(&inode_lock);
 136        if ((inode->i_state & flags) != flags) {
 137                const int was_dirty = inode->i_state & I_DIRTY;
 138
 139                inode->i_state |= flags;
 140
 141                /*
 142                 * If the inode is being synced, just update its dirty state.
 143                 * The unlocker will place the inode on the appropriate
 144                 * superblock list, based upon its state.
 145                 */
 146                if (inode->i_state & I_SYNC)
 147                        goto out;
 148
 149                /*
 150                 * Only add valid (hashed) inodes to the superblock's
 151                 * dirty list.  Add blockdev inodes as well.
 152                 */
 153                if (!S_ISBLK(inode->i_mode)) {
 154                        if (hlist_unhashed(&inode->i_hash))
 155                                goto out;
 156                }
 157                if (inode->i_state & (I_FREEING|I_CLEAR))
 158                        goto out;
 159
 160                /*
 161                 * If the inode was already on s_dirty/s_io/s_more_io, don't
 162                 * reposition it (that would break s_dirty time-ordering).
 163                 */
 164                if (!was_dirty) {
 165                        inode->dirtied_when = jiffies;
 166                        list_move(&inode->i_list, &sb->s_dirty);
 167                }
 168        }
 169out:
 170        spin_unlock(&inode_lock);
 171}
 172
 173EXPORT_SYMBOL(__mark_inode_dirty);
 174
 175static int write_inode(struct inode *inode, int sync)
 176{
 177        if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
 178                return inode->i_sb->s_op->write_inode(inode, sync);
 179        return 0;
 180}
 181
 182/*
 183 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 184 * furthest end of its superblock's dirty-inode list.
 185 *
 186 * Before stamping the inode's ->dirtied_when, we check to see whether it is
 187 * already the most-recently-dirtied inode on the s_dirty list.  If that is
 188 * the case then the inode must have been redirtied while it was being written
 189 * out and we don't reset its dirtied_when.
 190 */
 191static void redirty_tail(struct inode *inode)
 192{
 193        struct super_block *sb = inode->i_sb;
 194
 195        if (!list_empty(&sb->s_dirty)) {
 196                struct inode *tail_inode;
 197
 198                tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
 199                if (!time_after_eq(inode->dirtied_when,
 200                                tail_inode->dirtied_when))
 201                        inode->dirtied_when = jiffies;
 202        }
 203        list_move(&inode->i_list, &sb->s_dirty);
 204}
 205
 206/*
 207 * requeue inode for re-scanning after sb->s_io list is exhausted.
 208 */
 209static void requeue_io(struct inode *inode)
 210{
 211        list_move(&inode->i_list, &inode->i_sb->s_more_io);
 212}
 213
 214static void inode_sync_complete(struct inode *inode)
 215{
 216        /*
 217         * Prevent speculative execution through spin_unlock(&inode_lock);
 218         */
 219        smp_mb();
 220        wake_up_bit(&inode->i_state, __I_SYNC);
 221}
 222
 223/*
 224 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 225 */
 226static void move_expired_inodes(struct list_head *delaying_queue,
 227                               struct list_head *dispatch_queue,
 228                                unsigned long *older_than_this)
 229{
 230        while (!list_empty(delaying_queue)) {
 231                struct inode *inode = list_entry(delaying_queue->prev,
 232                                                struct inode, i_list);
 233                if (older_than_this &&
 234                        time_after(inode->dirtied_when, *older_than_this))
 235                        break;
 236                list_move(&inode->i_list, dispatch_queue);
 237        }
 238}
 239
 240/*
 241 * Queue all expired dirty inodes for io, eldest first.
 242 */
 243static void queue_io(struct super_block *sb,
 244                                unsigned long *older_than_this)
 245{
 246        list_splice_init(&sb->s_more_io, sb->s_io.prev);
 247        move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
 248}
 249
 250int sb_has_dirty_inodes(struct super_block *sb)
 251{
 252        return !list_empty(&sb->s_dirty) ||
 253               !list_empty(&sb->s_io) ||
 254               !list_empty(&sb->s_more_io);
 255}
 256EXPORT_SYMBOL(sb_has_dirty_inodes);
 257
 258/*
 259 * Write a single inode's dirty pages and inode data out to disk.
 260 * If `wait' is set, wait on the writeout.
 261 *
 262 * The whole writeout design is quite complex and fragile.  We want to avoid
 263 * starvation of particular inodes when others are being redirtied, prevent
 264 * livelocks, etc.
 265 *
 266 * Called under inode_lock.
 267 */
 268static int
 269__sync_single_inode(struct inode *inode, struct writeback_control *wbc)
 270{
 271        unsigned dirty;
 272        struct address_space *mapping = inode->i_mapping;
 273        int wait = wbc->sync_mode == WB_SYNC_ALL;
 274        int ret;
 275
 276        BUG_ON(inode->i_state & I_SYNC);
 277
 278        /* Set I_SYNC, reset I_DIRTY */
 279        dirty = inode->i_state & I_DIRTY;
 280        inode->i_state |= I_SYNC;
 281        inode->i_state &= ~I_DIRTY;
 282
 283        spin_unlock(&inode_lock);
 284
 285        ret = do_writepages(mapping, wbc);
 286
 287        /* Don't write the inode if only I_DIRTY_PAGES was set */
 288        if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
 289                int err = write_inode(inode, wait);
 290                if (ret == 0)
 291                        ret = err;
 292        }
 293
 294        if (wait) {
 295                int err = filemap_fdatawait(mapping);
 296                if (ret == 0)
 297                        ret = err;
 298        }
 299
 300        spin_lock(&inode_lock);
 301        inode->i_state &= ~I_SYNC;
 302        if (!(inode->i_state & I_FREEING)) {
 303                if (!(inode->i_state & I_DIRTY) &&
 304                    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
 305                        /*
 306                         * We didn't write back all the pages.  nfs_writepages()
 307                         * sometimes bales out without doing anything. Redirty
 308                         * the inode; Move it from s_io onto s_more_io/s_dirty.
 309                         */
 310                        /*
 311                         * akpm: if the caller was the kupdate function we put
 312                         * this inode at the head of s_dirty so it gets first
 313                         * consideration.  Otherwise, move it to the tail, for
 314                         * the reasons described there.  I'm not really sure
 315                         * how much sense this makes.  Presumably I had a good
 316                         * reasons for doing it this way, and I'd rather not
 317                         * muck with it at present.
 318                         */
 319                        if (wbc->for_kupdate) {
 320                                /*
 321                                 * For the kupdate function we move the inode
 322                                 * to s_more_io so it will get more writeout as
 323                                 * soon as the queue becomes uncongested.
 324                                 */
 325                                inode->i_state |= I_DIRTY_PAGES;
 326                                if (wbc->nr_to_write <= 0) {
 327                                        /*
 328                                         * slice used up: queue for next turn
 329                                         */
 330                                        requeue_io(inode);
 331                                } else {
 332                                        /*
 333                                         * somehow blocked: retry later
 334                                         */
 335                                        redirty_tail(inode);
 336                                }
 337                        } else {
 338                                /*
 339                                 * Otherwise fully redirty the inode so that
 340                                 * other inodes on this superblock will get some
 341                                 * writeout.  Otherwise heavy writing to one
 342                                 * file would indefinitely suspend writeout of
 343                                 * all the other files.
 344                                 */
 345                                inode->i_state |= I_DIRTY_PAGES;
 346                                redirty_tail(inode);
 347                        }
 348                } else if (inode->i_state & I_DIRTY) {
 349                        /*
 350                         * Someone redirtied the inode while were writing back
 351                         * the pages.
 352                         */
 353                        redirty_tail(inode);
 354                } else if (atomic_read(&inode->i_count)) {
 355                        /*
 356                         * The inode is clean, inuse
 357                         */
 358                        list_move(&inode->i_list, &inode_in_use);
 359                } else {
 360                        /*
 361                         * The inode is clean, unused
 362                         */
 363                        list_move(&inode->i_list, &inode_unused);
 364                }
 365        }
 366        inode_sync_complete(inode);
 367        return ret;
 368}
 369
 370/*
 371 * Write out an inode's dirty pages.  Called under inode_lock.  Either the
 372 * caller has ref on the inode (either via __iget or via syscall against an fd)
 373 * or the inode has I_WILL_FREE set (via generic_forget_inode)
 374 */
 375static int
 376__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 377{
 378        wait_queue_head_t *wqh;
 379
 380        if (!atomic_read(&inode->i_count))
 381                WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
 382        else
 383                WARN_ON(inode->i_state & I_WILL_FREE);
 384
 385        if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
 386                /*
 387                 * We're skipping this inode because it's locked, and we're not
 388                 * doing writeback-for-data-integrity.  Move it to s_more_io so
 389                 * that writeback can proceed with the other inodes on s_io.
 390                 * We'll have another go at writing back this inode when we
 391                 * completed a full scan of s_io.
 392                 */
 393                requeue_io(inode);
 394                return 0;
 395        }
 396
 397        /*
 398         * It's a data-integrity sync.  We must wait.
 399         */
 400        if (inode->i_state & I_SYNC) {
 401                DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
 402
 403                wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
 404                do {
 405                        spin_unlock(&inode_lock);
 406                        __wait_on_bit(wqh, &wq, inode_wait,
 407                                                        TASK_UNINTERRUPTIBLE);
 408                        spin_lock(&inode_lock);
 409                } while (inode->i_state & I_SYNC);
 410        }
 411        return __sync_single_inode(inode, wbc);
 412}
 413
 414/*
 415 * Write out a superblock's list of dirty inodes.  A wait will be performed
 416 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 417 *
 418 * If older_than_this is non-NULL, then only write out inodes which
 419 * had their first dirtying at a time earlier than *older_than_this.
 420 *
 421 * If we're a pdlfush thread, then implement pdflush collision avoidance
 422 * against the entire list.
 423 *
 424 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
 425 * that it can be located for waiting on in __writeback_single_inode().
 426 *
 427 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 428 * This function assumes that the blockdev superblock's inodes are backed by
 429 * a variety of queues, so all inodes are searched.  For other superblocks,
 430 * assume that all inodes are backed by the same queue.
 431 *
 432 * FIXME: this linear search could get expensive with many fileystems.  But
 433 * how to fix?  We need to go from an address_space to all inodes which share
 434 * a queue with that address_space.  (Easy: have a global "dirty superblocks"
 435 * list).
 436 *
 437 * The inodes to be written are parked on sb->s_io.  They are moved back onto
 438 * sb->s_dirty as they are selected for writing.  This way, none can be missed
 439 * on the writer throttling path, and we get decent balancing between many
 440 * throttled threads: we don't want them all piling up on inode_sync_wait.
 441 */
 442void generic_sync_sb_inodes(struct super_block *sb,
 443                                struct writeback_control *wbc)
 444{
 445        const unsigned long start = jiffies;    /* livelock avoidance */
 446
 447        spin_lock(&inode_lock);
 448        if (!wbc->for_kupdate || list_empty(&sb->s_io))
 449                queue_io(sb, wbc->older_than_this);
 450
 451        while (!list_empty(&sb->s_io)) {
 452                struct inode *inode = list_entry(sb->s_io.prev,
 453                                                struct inode, i_list);
 454                struct address_space *mapping = inode->i_mapping;
 455                struct backing_dev_info *bdi = mapping->backing_dev_info;
 456                long pages_skipped;
 457
 458                if (!bdi_cap_writeback_dirty(bdi)) {
 459                        redirty_tail(inode);
 460                        if (sb_is_blkdev_sb(sb)) {
 461                                /*
 462                                 * Dirty memory-backed blockdev: the ramdisk
 463                                 * driver does this.  Skip just this inode
 464                                 */
 465                                continue;
 466                        }
 467                        /*
 468                         * Dirty memory-backed inode against a filesystem other
 469                         * than the kernel-internal bdev filesystem.  Skip the
 470                         * entire superblock.
 471                         */
 472                        break;
 473                }
 474
 475                if (wbc->nonblocking && bdi_write_congested(bdi)) {
 476                        wbc->encountered_congestion = 1;
 477                        if (!sb_is_blkdev_sb(sb))
 478                                break;          /* Skip a congested fs */
 479                        requeue_io(inode);
 480                        continue;               /* Skip a congested blockdev */
 481                }
 482
 483                if (wbc->bdi && bdi != wbc->bdi) {
 484                        if (!sb_is_blkdev_sb(sb))
 485                                break;          /* fs has the wrong queue */
 486                        requeue_io(inode);
 487                        continue;               /* blockdev has wrong queue */
 488                }
 489
 490                /* Was this inode dirtied after sync_sb_inodes was called? */
 491                if (time_after(inode->dirtied_when, start))
 492                        break;
 493
 494                /* Is another pdflush already flushing this queue? */
 495                if (current_is_pdflush() && !writeback_acquire(bdi))
 496                        break;
 497
 498                BUG_ON(inode->i_state & I_FREEING);
 499                __iget(inode);
 500                pages_skipped = wbc->pages_skipped;
 501                __writeback_single_inode(inode, wbc);
 502                if (wbc->sync_mode == WB_SYNC_HOLD) {
 503                        inode->dirtied_when = jiffies;
 504                        list_move(&inode->i_list, &sb->s_dirty);
 505                }
 506                if (current_is_pdflush())
 507                        writeback_release(bdi);
 508                if (wbc->pages_skipped != pages_skipped) {
 509                        /*
 510                         * writeback is not making progress due to locked
 511                         * buffers.  Skip this inode for now.
 512                         */
 513                        redirty_tail(inode);
 514                }
 515                spin_unlock(&inode_lock);
 516                iput(inode);
 517                cond_resched();
 518                spin_lock(&inode_lock);
 519                if (wbc->nr_to_write <= 0) {
 520                        wbc->more_io = 1;
 521                        break;
 522                }
 523                if (!list_empty(&sb->s_more_io))
 524                        wbc->more_io = 1;
 525        }
 526        spin_unlock(&inode_lock);
 527        return;         /* Leave any unwritten inodes on s_io */
 528}
 529EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
 530
 531static void sync_sb_inodes(struct super_block *sb,
 532                                struct writeback_control *wbc)
 533{
 534        generic_sync_sb_inodes(sb, wbc);
 535}
 536
 537/*
 538 * Start writeback of dirty pagecache data against all unlocked inodes.
 539 *
 540 * Note:
 541 * We don't need to grab a reference to superblock here. If it has non-empty
 542 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
 543 * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
 544 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
 545 * inode from superblock lists we are OK.
 546 *
 547 * If `older_than_this' is non-zero then only flush inodes which have a
 548 * flushtime older than *older_than_this.
 549 *
 550 * If `bdi' is non-zero then we will scan the first inode against each
 551 * superblock until we find the matching ones.  One group will be the dirty
 552 * inodes against a filesystem.  Then when we hit the dummy blockdev superblock,
 553 * sync_sb_inodes will seekout the blockdev which matches `bdi'.  Maybe not
 554 * super-efficient but we're about to do a ton of I/O...
 555 */
 556void
 557writeback_inodes(struct writeback_control *wbc)
 558{
 559        struct super_block *sb;
 560
 561        might_sleep();
 562        spin_lock(&sb_lock);
 563restart:
 564        list_for_each_entry_reverse(sb, &super_blocks, s_list) {
 565                if (sb_has_dirty_inodes(sb)) {
 566                        /* we're making our own get_super here */
 567                        sb->s_count++;
 568                        spin_unlock(&sb_lock);
 569                        /*
 570                         * If we can't get the readlock, there's no sense in
 571                         * waiting around, most of the time the FS is going to
 572                         * be unmounted by the time it is released.
 573                         */
 574                        if (down_read_trylock(&sb->s_umount)) {
 575                                if (sb->s_root)
 576                                        sync_sb_inodes(sb, wbc);
 577                                up_read(&sb->s_umount);
 578                        }
 579                        spin_lock(&sb_lock);
 580                        if (__put_super_and_need_restart(sb))
 581                                goto restart;
 582                }
 583                if (wbc->nr_to_write <= 0)
 584                        break;
 585        }
 586        spin_unlock(&sb_lock);
 587}
 588
 589/*
 590 * writeback and wait upon the filesystem's dirty inodes.  The caller will
 591 * do this in two passes - one to write, and one to wait.  WB_SYNC_HOLD is
 592 * used to park the written inodes on sb->s_dirty for the wait pass.
 593 *
 594 * A finite limit is set on the number of pages which will be written.
 595 * To prevent infinite livelock of sys_sync().
 596 *
 597 * We add in the number of potentially dirty inodes, because each inode write
 598 * can dirty pagecache in the underlying blockdev.
 599 */
 600void sync_inodes_sb(struct super_block *sb, int wait)
 601{
 602        struct writeback_control wbc = {
 603                .sync_mode      = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
 604                .range_start    = 0,
 605                .range_end      = LLONG_MAX,
 606        };
 607        unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
 608        unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
 609
 610        wbc.nr_to_write = nr_dirty + nr_unstable +
 611                        (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
 612                        nr_dirty + nr_unstable;
 613        wbc.nr_to_write += wbc.nr_to_write / 2;         /* Bit more for luck */
 614        sync_sb_inodes(sb, &wbc);
 615}
 616
 617/*
 618 * Rather lame livelock avoidance.
 619 */
 620static void set_sb_syncing(int val)
 621{
 622        struct super_block *sb;
 623        spin_lock(&sb_lock);
 624        list_for_each_entry_reverse(sb, &super_blocks, s_list)
 625                sb->s_syncing = val;
 626        spin_unlock(&sb_lock);
 627}
 628
 629/**
 630 * sync_inodes - writes all inodes to disk
 631 * @wait: wait for completion
 632 *
 633 * sync_inodes() goes through each super block's dirty inode list, writes the
 634 * inodes out, waits on the writeout and puts the inodes back on the normal
 635 * list.
 636 *
 637 * This is for sys_sync().  fsync_dev() uses the same algorithm.  The subtle
 638 * part of the sync functions is that the blockdev "superblock" is processed
 639 * last.  This is because the write_inode() function of a typical fs will
 640 * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
 641 * What we want to do is to perform all that dirtying first, and then write
 642 * back all those inode blocks via the blockdev mapping in one sweep.  So the
 643 * additional (somewhat redundant) sync_blockdev() calls here are to make
 644 * sure that really happens.  Because if we call sync_inodes_sb(wait=1) with
 645 * outstanding dirty inodes, the writeback goes block-at-a-time within the
 646 * filesystem's write_inode().  This is extremely slow.
 647 */
 648static void __sync_inodes(int wait)
 649{
 650        struct super_block *sb;
 651
 652        spin_lock(&sb_lock);
 653restart:
 654        list_for_each_entry(sb, &super_blocks, s_list) {
 655                if (sb->s_syncing)
 656                        continue;
 657                sb->s_syncing = 1;
 658                sb->s_count++;
 659                spin_unlock(&sb_lock);
 660                down_read(&sb->s_umount);
 661                if (sb->s_root) {
 662                        sync_inodes_sb(sb, wait);
 663                        sync_blockdev(sb->s_bdev);
 664                }
 665                up_read(&sb->s_umount);
 666                spin_lock(&sb_lock);
 667                if (__put_super_and_need_restart(sb))
 668                        goto restart;
 669        }
 670        spin_unlock(&sb_lock);
 671}
 672
 673void sync_inodes(int wait)
 674{
 675        set_sb_syncing(0);
 676        __sync_inodes(0);
 677
 678        if (wait) {
 679                set_sb_syncing(0);
 680                __sync_inodes(1);
 681        }
 682}
 683
 684/**
 685 * write_inode_now      -       write an inode to disk
 686 * @inode: inode to write to disk
 687 * @sync: whether the write should be synchronous or not
 688 *
 689 * This function commits an inode to disk immediately if it is dirty. This is
 690 * primarily needed by knfsd.
 691 *
 692 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
 693 */
 694int write_inode_now(struct inode *inode, int sync)
 695{
 696        int ret;
 697        struct writeback_control wbc = {
 698                .nr_to_write = LONG_MAX,
 699                .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
 700                .range_start = 0,
 701                .range_end = LLONG_MAX,
 702        };
 703
 704        if (!mapping_cap_writeback_dirty(inode->i_mapping))
 705                wbc.nr_to_write = 0;
 706
 707        might_sleep();
 708        spin_lock(&inode_lock);
 709        ret = __writeback_single_inode(inode, &wbc);
 710        spin_unlock(&inode_lock);
 711        if (sync)
 712                inode_sync_wait(inode);
 713        return ret;
 714}
 715EXPORT_SYMBOL(write_inode_now);
 716
 717/**
 718 * sync_inode - write an inode and its pages to disk.
 719 * @inode: the inode to sync
 720 * @wbc: controls the writeback mode
 721 *
 722 * sync_inode() will write an inode and its pages to disk.  It will also
 723 * correctly update the inode on its superblock's dirty inode lists and will
 724 * update inode->i_state.
 725 *
 726 * The caller must have a ref on the inode.
 727 */
 728int sync_inode(struct inode *inode, struct writeback_control *wbc)
 729{
 730        int ret;
 731
 732        spin_lock(&inode_lock);
 733        ret = __writeback_single_inode(inode, wbc);
 734        spin_unlock(&inode_lock);
 735        return ret;
 736}
 737EXPORT_SYMBOL(sync_inode);
 738
 739/**
 740 * generic_osync_inode - flush all dirty data for a given inode to disk
 741 * @inode: inode to write
 742 * @mapping: the address_space that should be flushed
 743 * @what:  what to write and wait upon
 744 *
 745 * This can be called by file_write functions for files which have the
 746 * O_SYNC flag set, to flush dirty writes to disk.
 747 *
 748 * @what is a bitmask, specifying which part of the inode's data should be
 749 * written and waited upon.
 750 *
 751 *    OSYNC_DATA:     i_mapping's dirty data
 752 *    OSYNC_METADATA: the buffers at i_mapping->private_list
 753 *    OSYNC_INODE:    the inode itself
 754 */
 755
 756int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
 757{
 758        int err = 0;
 759        int need_write_inode_now = 0;
 760        int err2;
 761
 762        if (what & OSYNC_DATA)
 763                err = filemap_fdatawrite(mapping);
 764        if (what & (OSYNC_METADATA|OSYNC_DATA)) {
 765                err2 = sync_mapping_buffers(mapping);
 766                if (!err)
 767                        err = err2;
 768        }
 769        if (what & OSYNC_DATA) {
 770                err2 = filemap_fdatawait(mapping);
 771                if (!err)
 772                        err = err2;
 773        }
 774
 775        spin_lock(&inode_lock);
 776        if ((inode->i_state & I_DIRTY) &&
 777            ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
 778                need_write_inode_now = 1;
 779        spin_unlock(&inode_lock);
 780
 781        if (need_write_inode_now) {
 782                err2 = write_inode_now(inode, 1);
 783                if (!err)
 784                        err = err2;
 785        }
 786        else
 787                inode_sync_wait(inode);
 788
 789        return err;
 790}
 791EXPORT_SYMBOL(generic_osync_inode);
 792