linux/fs/buffer.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/buffer.c
   3 *
   4 *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
   5 */
   6
   7/*
   8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
   9 *
  10 * Removed a lot of unnecessary code and simplified things now that
  11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
  12 *
  13 * Speed up hash, lru, and free list operations.  Use gfp() for allocating
  14 * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
  15 *
  16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
  17 *
  18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
  19 */
  20
  21#include <linux/kernel.h>
  22#include <linux/syscalls.h>
  23#include <linux/fs.h>
  24#include <linux/mm.h>
  25#include <linux/percpu.h>
  26#include <linux/slab.h>
  27#include <linux/capability.h>
  28#include <linux/blkdev.h>
  29#include <linux/file.h>
  30#include <linux/quotaops.h>
  31#include <linux/highmem.h>
  32#include <linux/export.h>
  33#include <linux/writeback.h>
  34#include <linux/hash.h>
  35#include <linux/suspend.h>
  36#include <linux/buffer_head.h>
  37#include <linux/task_io_accounting_ops.h>
  38#include <linux/bio.h>
  39#include <linux/notifier.h>
  40#include <linux/cpu.h>
  41#include <linux/bitops.h>
  42#include <linux/mpage.h>
  43#include <linux/bit_spinlock.h>
  44#include <trace/events/block.h>
  45
  46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
  47
  48#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
  49
  50void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
  51{
  52        bh->b_end_io = handler;
  53        bh->b_private = private;
  54}
  55EXPORT_SYMBOL(init_buffer);
  56
  57inline void touch_buffer(struct buffer_head *bh)
  58{
  59        trace_block_touch_buffer(bh);
  60        mark_page_accessed(bh->b_page);
  61}
  62EXPORT_SYMBOL(touch_buffer);
  63
  64static int sleep_on_buffer(void *word)
  65{
  66        io_schedule();
  67        return 0;
  68}
  69
  70void __lock_buffer(struct buffer_head *bh)
  71{
  72        wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
  73                                                        TASK_UNINTERRUPTIBLE);
  74}
  75EXPORT_SYMBOL(__lock_buffer);
  76
  77void unlock_buffer(struct buffer_head *bh)
  78{
  79        clear_bit_unlock(BH_Lock, &bh->b_state);
  80        smp_mb__after_clear_bit();
  81        wake_up_bit(&bh->b_state, BH_Lock);
  82}
  83EXPORT_SYMBOL(unlock_buffer);
  84
  85/*
  86 * Block until a buffer comes unlocked.  This doesn't stop it
  87 * from becoming locked again - you have to lock it yourself
  88 * if you want to preserve its state.
  89 */
  90void __wait_on_buffer(struct buffer_head * bh)
  91{
  92        wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
  93}
  94EXPORT_SYMBOL(__wait_on_buffer);
  95
  96static void
  97__clear_page_buffers(struct page *page)
  98{
  99        ClearPagePrivate(page);
 100        set_page_private(page, 0);
 101        page_cache_release(page);
 102}
 103
 104
 105static int quiet_error(struct buffer_head *bh)
 106{
 107        if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
 108                return 0;
 109        return 1;
 110}
 111
 112
 113static void buffer_io_error(struct buffer_head *bh)
 114{
 115        char b[BDEVNAME_SIZE];
 116        printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
 117                        bdevname(bh->b_bdev, b),
 118                        (unsigned long long)bh->b_blocknr);
 119}
 120
 121/*
 122 * End-of-IO handler helper function which does not touch the bh after
 123 * unlocking it.
 124 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
 125 * a race there is benign: unlock_buffer() only use the bh's address for
 126 * hashing after unlocking the buffer, so it doesn't actually touch the bh
 127 * itself.
 128 */
 129static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
 130{
 131        if (uptodate) {
 132                set_buffer_uptodate(bh);
 133        } else {
 134                /* This happens, due to failed READA attempts. */
 135                clear_buffer_uptodate(bh);
 136        }
 137        unlock_buffer(bh);
 138}
 139
 140/*
 141 * Default synchronous end-of-IO handler..  Just mark it up-to-date and
 142 * unlock the buffer. This is what ll_rw_block uses too.
 143 */
 144void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
 145{
 146        __end_buffer_read_notouch(bh, uptodate);
 147        put_bh(bh);
 148}
 149EXPORT_SYMBOL(end_buffer_read_sync);
 150
 151void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 152{
 153        char b[BDEVNAME_SIZE];
 154
 155        if (uptodate) {
 156                set_buffer_uptodate(bh);
 157        } else {
 158                if (!quiet_error(bh)) {
 159                        buffer_io_error(bh);
 160                        printk(KERN_WARNING "lost page write due to "
 161                                        "I/O error on %s\n",
 162                                       bdevname(bh->b_bdev, b));
 163                }
 164                set_buffer_write_io_error(bh);
 165                clear_buffer_uptodate(bh);
 166        }
 167        unlock_buffer(bh);
 168        put_bh(bh);
 169}
 170EXPORT_SYMBOL(end_buffer_write_sync);
 171
 172/*
 173 * Various filesystems appear to want __find_get_block to be non-blocking.
 174 * But it's the page lock which protects the buffers.  To get around this,
 175 * we get exclusion from try_to_free_buffers with the blockdev mapping's
 176 * private_lock.
 177 *
 178 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
 179 * may be quite high.  This code could TryLock the page, and if that
 180 * succeeds, there is no need to take private_lock. (But if
 181 * private_lock is contended then so is mapping->tree_lock).
 182 */
 183static struct buffer_head *
 184__find_get_block_slow(struct block_device *bdev, sector_t block)
 185{
 186        struct inode *bd_inode = bdev->bd_inode;
 187        struct address_space *bd_mapping = bd_inode->i_mapping;
 188        struct buffer_head *ret = NULL;
 189        pgoff_t index;
 190        struct buffer_head *bh;
 191        struct buffer_head *head;
 192        struct page *page;
 193        int all_mapped = 1;
 194
 195        index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
 196        page = find_get_page(bd_mapping, index);
 197        if (!page)
 198                goto out;
 199
 200        spin_lock(&bd_mapping->private_lock);
 201        if (!page_has_buffers(page))
 202                goto out_unlock;
 203        head = page_buffers(page);
 204        bh = head;
 205        do {
 206                if (!buffer_mapped(bh))
 207                        all_mapped = 0;
 208                else if (bh->b_blocknr == block) {
 209                        ret = bh;
 210                        get_bh(bh);
 211                        goto out_unlock;
 212                }
 213                bh = bh->b_this_page;
 214        } while (bh != head);
 215
 216        /* we might be here because some of the buffers on this page are
 217         * not mapped.  This is due to various races between
 218         * file io on the block device and getblk.  It gets dealt with
 219         * elsewhere, don't buffer_error if we had some unmapped buffers
 220         */
 221        if (all_mapped) {
 222                char b[BDEVNAME_SIZE];
 223
 224                printk("__find_get_block_slow() failed. "
 225                        "block=%llu, b_blocknr=%llu\n",
 226                        (unsigned long long)block,
 227                        (unsigned long long)bh->b_blocknr);
 228                printk("b_state=0x%08lx, b_size=%zu\n",
 229                        bh->b_state, bh->b_size);
 230                printk("device %s blocksize: %d\n", bdevname(bdev, b),
 231                        1 << bd_inode->i_blkbits);
 232        }
 233out_unlock:
 234        spin_unlock(&bd_mapping->private_lock);
 235        page_cache_release(page);
 236out:
 237        return ret;
 238}
 239
 240/*
 241 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
 242 */
 243static void free_more_memory(void)
 244{
 245        struct zone *zone;
 246        int nid;
 247
 248        wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
 249        yield();
 250
 251        for_each_online_node(nid) {
 252                (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
 253                                                gfp_zone(GFP_NOFS), NULL,
 254                                                &zone);
 255                if (zone)
 256                        try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
 257                                                GFP_NOFS, NULL);
 258        }
 259}
 260
 261/*
 262 * I/O completion handler for block_read_full_page() - pages
 263 * which come unlocked at the end of I/O.
 264 */
 265static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
 266{
 267        unsigned long flags;
 268        struct buffer_head *first;
 269        struct buffer_head *tmp;
 270        struct page *page;
 271        int page_uptodate = 1;
 272
 273        BUG_ON(!buffer_async_read(bh));
 274
 275        page = bh->b_page;
 276        if (uptodate) {
 277                set_buffer_uptodate(bh);
 278        } else {
 279                clear_buffer_uptodate(bh);
 280                if (!quiet_error(bh))
 281                        buffer_io_error(bh);
 282                SetPageError(page);
 283        }
 284
 285        /*
 286         * Be _very_ careful from here on. Bad things can happen if
 287         * two buffer heads end IO at almost the same time and both
 288         * decide that the page is now completely done.
 289         */
 290        first = page_buffers(page);
 291        local_irq_save(flags);
 292        bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
 293        clear_buffer_async_read(bh);
 294        unlock_buffer(bh);
 295        tmp = bh;
 296        do {
 297                if (!buffer_uptodate(tmp))
 298                        page_uptodate = 0;
 299                if (buffer_async_read(tmp)) {
 300                        BUG_ON(!buffer_locked(tmp));
 301                        goto still_busy;
 302                }
 303                tmp = tmp->b_this_page;
 304        } while (tmp != bh);
 305        bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
 306        local_irq_restore(flags);
 307
 308        /*
 309         * If none of the buffers had errors and they are all
 310         * uptodate then we can set the page uptodate.
 311         */
 312        if (page_uptodate && !PageError(page))
 313                SetPageUptodate(page);
 314        unlock_page(page);
 315        return;
 316
 317still_busy:
 318        bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
 319        local_irq_restore(flags);
 320        return;
 321}
 322
 323/*
 324 * Completion handler for block_write_full_page() - pages which are unlocked
 325 * during I/O, and which have PageWriteback cleared upon I/O completion.
 326 */
 327void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 328{
 329        char b[BDEVNAME_SIZE];
 330        unsigned long flags;
 331        struct buffer_head *first;
 332        struct buffer_head *tmp;
 333        struct page *page;
 334
 335        BUG_ON(!buffer_async_write(bh));
 336
 337        page = bh->b_page;
 338        if (uptodate) {
 339                set_buffer_uptodate(bh);
 340        } else {
 341                if (!quiet_error(bh)) {
 342                        buffer_io_error(bh);
 343                        printk(KERN_WARNING "lost page write due to "
 344                                        "I/O error on %s\n",
 345                               bdevname(bh->b_bdev, b));
 346                }
 347                set_bit(AS_EIO, &page->mapping->flags);
 348                set_buffer_write_io_error(bh);
 349                clear_buffer_uptodate(bh);
 350                SetPageError(page);
 351        }
 352
 353        first = page_buffers(page);
 354        local_irq_save(flags);
 355        bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
 356
 357        clear_buffer_async_write(bh);
 358        unlock_buffer(bh);
 359        tmp = bh->b_this_page;
 360        while (tmp != bh) {
 361                if (buffer_async_write(tmp)) {
 362                        BUG_ON(!buffer_locked(tmp));
 363                        goto still_busy;
 364                }
 365                tmp = tmp->b_this_page;
 366        }
 367        bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
 368        local_irq_restore(flags);
 369        end_page_writeback(page);
 370        return;
 371
 372still_busy:
 373        bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
 374        local_irq_restore(flags);
 375        return;
 376}
 377EXPORT_SYMBOL(end_buffer_async_write);
 378
 379/*
 380 * If a page's buffers are under async readin (end_buffer_async_read
 381 * completion) then there is a possibility that another thread of
 382 * control could lock one of the buffers after it has completed
 383 * but while some of the other buffers have not completed.  This
 384 * locked buffer would confuse end_buffer_async_read() into not unlocking
 385 * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
 386 * that this buffer is not under async I/O.
 387 *
 388 * The page comes unlocked when it has no locked buffer_async buffers
 389 * left.
 390 *
 391 * PageLocked prevents anyone starting new async I/O reads any of
 392 * the buffers.
 393 *
 394 * PageWriteback is used to prevent simultaneous writeout of the same
 395 * page.
 396 *
 397 * PageLocked prevents anyone from starting writeback of a page which is
 398 * under read I/O (PageWriteback is only ever set against a locked page).
 399 */
 400static void mark_buffer_async_read(struct buffer_head *bh)
 401{
 402        bh->b_end_io = end_buffer_async_read;
 403        set_buffer_async_read(bh);
 404}
 405
 406static void mark_buffer_async_write_endio(struct buffer_head *bh,
 407                                          bh_end_io_t *handler)
 408{
 409        bh->b_end_io = handler;
 410        set_buffer_async_write(bh);
 411}
 412
 413void mark_buffer_async_write(struct buffer_head *bh)
 414{
 415        mark_buffer_async_write_endio(bh, end_buffer_async_write);
 416}
 417EXPORT_SYMBOL(mark_buffer_async_write);
 418
 419
 420/*
 421 * fs/buffer.c contains helper functions for buffer-backed address space's
 422 * fsync functions.  A common requirement for buffer-based filesystems is
 423 * that certain data from the backing blockdev needs to be written out for
 424 * a successful fsync().  For example, ext2 indirect blocks need to be
 425 * written back and waited upon before fsync() returns.
 426 *
 427 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
 428 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
 429 * management of a list of dependent buffers at ->i_mapping->private_list.
 430 *
 431 * Locking is a little subtle: try_to_free_buffers() will remove buffers
 432 * from their controlling inode's queue when they are being freed.  But
 433 * try_to_free_buffers() will be operating against the *blockdev* mapping
 434 * at the time, not against the S_ISREG file which depends on those buffers.
 435 * So the locking for private_list is via the private_lock in the address_space
 436 * which backs the buffers.  Which is different from the address_space 
 437 * against which the buffers are listed.  So for a particular address_space,
 438 * mapping->private_lock does *not* protect mapping->private_list!  In fact,
 439 * mapping->private_list will always be protected by the backing blockdev's
 440 * ->private_lock.
 441 *
 442 * Which introduces a requirement: all buffers on an address_space's
 443 * ->private_list must be from the same address_space: the blockdev's.
 444 *
 445 * address_spaces which do not place buffers at ->private_list via these
 446 * utility functions are free to use private_lock and private_list for
 447 * whatever they want.  The only requirement is that list_empty(private_list)
 448 * be true at clear_inode() time.
 449 *
 450 * FIXME: clear_inode should not call invalidate_inode_buffers().  The
 451 * filesystems should do that.  invalidate_inode_buffers() should just go
 452 * BUG_ON(!list_empty).
 453 *
 454 * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
 455 * take an address_space, not an inode.  And it should be called
 456 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
 457 * queued up.
 458 *
 459 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
 460 * list if it is already on a list.  Because if the buffer is on a list,
 461 * it *must* already be on the right one.  If not, the filesystem is being
 462 * silly.  This will save a ton of locking.  But first we have to ensure
 463 * that buffers are taken *off* the old inode's list when they are freed
 464 * (presumably in truncate).  That requires careful auditing of all
 465 * filesystems (do it inside bforget()).  It could also be done by bringing
 466 * b_inode back.
 467 */
 468
 469/*
 470 * The buffer's backing address_space's private_lock must be held
 471 */
 472static void __remove_assoc_queue(struct buffer_head *bh)
 473{
 474        list_del_init(&bh->b_assoc_buffers);
 475        WARN_ON(!bh->b_assoc_map);
 476        if (buffer_write_io_error(bh))
 477                set_bit(AS_EIO, &bh->b_assoc_map->flags);
 478        bh->b_assoc_map = NULL;
 479}
 480
 481int inode_has_buffers(struct inode *inode)
 482{
 483        return !list_empty(&inode->i_data.private_list);
 484}
 485
 486/*
 487 * osync is designed to support O_SYNC io.  It waits synchronously for
 488 * all already-submitted IO to complete, but does not queue any new
 489 * writes to the disk.
 490 *
 491 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
 492 * you dirty the buffers, and then use osync_inode_buffers to wait for
 493 * completion.  Any other dirty buffers which are not yet queued for
 494 * write will not be flushed to disk by the osync.
 495 */
 496static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
 497{
 498        struct buffer_head *bh;
 499        struct list_head *p;
 500        int err = 0;
 501
 502        spin_lock(lock);
 503repeat:
 504        list_for_each_prev(p, list) {
 505                bh = BH_ENTRY(p);
 506                if (buffer_locked(bh)) {
 507                        get_bh(bh);
 508                        spin_unlock(lock);
 509                        wait_on_buffer(bh);
 510                        if (!buffer_uptodate(bh))
 511                                err = -EIO;
 512                        brelse(bh);
 513                        spin_lock(lock);
 514                        goto repeat;
 515                }
 516        }
 517        spin_unlock(lock);
 518        return err;
 519}
 520
 521static void do_thaw_one(struct super_block *sb, void *unused)
 522{
 523        char b[BDEVNAME_SIZE];
 524        while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
 525                printk(KERN_WARNING "Emergency Thaw on %s\n",
 526                       bdevname(sb->s_bdev, b));
 527}
 528
 529static void do_thaw_all(struct work_struct *work)
 530{
 531        iterate_supers(do_thaw_one, NULL);
 532        kfree(work);
 533        printk(KERN_WARNING "Emergency Thaw complete\n");
 534}
 535
 536/**
 537 * emergency_thaw_all -- forcibly thaw every frozen filesystem
 538 *
 539 * Used for emergency unfreeze of all filesystems via SysRq
 540 */
 541void emergency_thaw_all(void)
 542{
 543        struct work_struct *work;
 544
 545        work = kmalloc(sizeof(*work), GFP_ATOMIC);
 546        if (work) {
 547                INIT_WORK(work, do_thaw_all);
 548                schedule_work(work);
 549        }
 550}
 551
 552/**
 553 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
 554 * @mapping: the mapping which wants those buffers written
 555 *
 556 * Starts I/O against the buffers at mapping->private_list, and waits upon
 557 * that I/O.
 558 *
 559 * Basically, this is a convenience function for fsync().
 560 * @mapping is a file or directory which needs those buffers to be written for
 561 * a successful fsync().
 562 */
 563int sync_mapping_buffers(struct address_space *mapping)
 564{
 565        struct address_space *buffer_mapping = mapping->private_data;
 566
 567        if (buffer_mapping == NULL || list_empty(&mapping->private_list))
 568                return 0;
 569
 570        return fsync_buffers_list(&buffer_mapping->private_lock,
 571                                        &mapping->private_list);
 572}
 573EXPORT_SYMBOL(sync_mapping_buffers);
 574
 575/*
 576 * Called when we've recently written block `bblock', and it is known that
 577 * `bblock' was for a buffer_boundary() buffer.  This means that the block at
 578 * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
 579 * dirty, schedule it for IO.  So that indirects merge nicely with their data.
 580 */
 581void write_boundary_block(struct block_device *bdev,
 582                        sector_t bblock, unsigned blocksize)
 583{
 584        struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
 585        if (bh) {
 586                if (buffer_dirty(bh))
 587                        ll_rw_block(WRITE, 1, &bh);
 588                put_bh(bh);
 589        }
 590}
 591
 592void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
 593{
 594        struct address_space *mapping = inode->i_mapping;
 595        struct address_space *buffer_mapping = bh->b_page->mapping;
 596
 597        mark_buffer_dirty(bh);
 598        if (!mapping->private_data) {
 599                mapping->private_data = buffer_mapping;
 600        } else {
 601                BUG_ON(mapping->private_data != buffer_mapping);
 602        }
 603        if (!bh->b_assoc_map) {
 604                spin_lock(&buffer_mapping->private_lock);
 605                list_move_tail(&bh->b_assoc_buffers,
 606                                &mapping->private_list);
 607                bh->b_assoc_map = mapping;
 608                spin_unlock(&buffer_mapping->private_lock);
 609        }
 610}
 611EXPORT_SYMBOL(mark_buffer_dirty_inode);
 612
 613/*
 614 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
 615 * dirty.
 616 *
 617 * If warn is true, then emit a warning if the page is not uptodate and has
 618 * not been truncated.
 619 */
 620static void __set_page_dirty(struct page *page,
 621                struct address_space *mapping, int warn)
 622{
 623        spin_lock_irq(&mapping->tree_lock);
 624        if (page->mapping) {    /* Race with truncate? */
 625                WARN_ON_ONCE(warn && !PageUptodate(page));
 626                account_page_dirtied(page, mapping);
 627                radix_tree_tag_set(&mapping->page_tree,
 628                                page_index(page), PAGECACHE_TAG_DIRTY);
 629        }
 630        spin_unlock_irq(&mapping->tree_lock);
 631        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 632}
 633
 634/*
 635 * Add a page to the dirty page list.
 636 *
 637 * It is a sad fact of life that this function is called from several places
 638 * deeply under spinlocking.  It may not sleep.
 639 *
 640 * If the page has buffers, the uptodate buffers are set dirty, to preserve
 641 * dirty-state coherency between the page and the buffers.  It the page does
 642 * not have buffers then when they are later attached they will all be set
 643 * dirty.
 644 *
 645 * The buffers are dirtied before the page is dirtied.  There's a small race
 646 * window in which a writepage caller may see the page cleanness but not the
 647 * buffer dirtiness.  That's fine.  If this code were to set the page dirty
 648 * before the buffers, a concurrent writepage caller could clear the page dirty
 649 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
 650 * page on the dirty page list.
 651 *
 652 * We use private_lock to lock against try_to_free_buffers while using the
 653 * page's buffer list.  Also use this to protect against clean buffers being
 654 * added to the page after it was set dirty.
 655 *
 656 * FIXME: may need to call ->reservepage here as well.  That's rather up to the
 657 * address_space though.
 658 */
 659int __set_page_dirty_buffers(struct page *page)
 660{
 661        int newly_dirty;
 662        struct address_space *mapping = page_mapping(page);
 663
 664        if (unlikely(!mapping))
 665                return !TestSetPageDirty(page);
 666
 667        spin_lock(&mapping->private_lock);
 668        if (page_has_buffers(page)) {
 669                struct buffer_head *head = page_buffers(page);
 670                struct buffer_head *bh = head;
 671
 672                do {
 673                        set_buffer_dirty(bh);
 674                        bh = bh->b_this_page;
 675                } while (bh != head);
 676        }
 677        newly_dirty = !TestSetPageDirty(page);
 678        spin_unlock(&mapping->private_lock);
 679
 680        if (newly_dirty)
 681                __set_page_dirty(page, mapping, 1);
 682        return newly_dirty;
 683}
 684EXPORT_SYMBOL(__set_page_dirty_buffers);
 685
 686/*
 687 * Write out and wait upon a list of buffers.
 688 *
 689 * We have conflicting pressures: we want to make sure that all
 690 * initially dirty buffers get waited on, but that any subsequently
 691 * dirtied buffers don't.  After all, we don't want fsync to last
 692 * forever if somebody is actively writing to the file.
 693 *
 694 * Do this in two main stages: first we copy dirty buffers to a
 695 * temporary inode list, queueing the writes as we go.  Then we clean
 696 * up, waiting for those writes to complete.
 697 * 
 698 * During this second stage, any subsequent updates to the file may end
 699 * up refiling the buffer on the original inode's dirty list again, so
 700 * there is a chance we will end up with a buffer queued for write but
 701 * not yet completed on that list.  So, as a final cleanup we go through
 702 * the osync code to catch these locked, dirty buffers without requeuing
 703 * any newly dirty buffers for write.
 704 */
 705static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 706{
 707        struct buffer_head *bh;
 708        struct list_head tmp;
 709        struct address_space *mapping;
 710        int err = 0, err2;
 711        struct blk_plug plug;
 712
 713        INIT_LIST_HEAD(&tmp);
 714        blk_start_plug(&plug);
 715
 716        spin_lock(lock);
 717        while (!list_empty(list)) {
 718                bh = BH_ENTRY(list->next);
 719                mapping = bh->b_assoc_map;
 720                __remove_assoc_queue(bh);
 721                /* Avoid race with mark_buffer_dirty_inode() which does
 722                 * a lockless check and we rely on seeing the dirty bit */
 723                smp_mb();
 724                if (buffer_dirty(bh) || buffer_locked(bh)) {
 725                        list_add(&bh->b_assoc_buffers, &tmp);
 726                        bh->b_assoc_map = mapping;
 727                        if (buffer_dirty(bh)) {
 728                                get_bh(bh);
 729                                spin_unlock(lock);
 730                                /*
 731                                 * Ensure any pending I/O completes so that
 732                                 * write_dirty_buffer() actually writes the
 733                                 * current contents - it is a noop if I/O is
 734                                 * still in flight on potentially older
 735                                 * contents.
 736                                 */
 737                                write_dirty_buffer(bh, WRITE_SYNC);
 738
 739                                /*
 740                                 * Kick off IO for the previous mapping. Note
 741                                 * that we will not run the very last mapping,
 742                                 * wait_on_buffer() will do that for us
 743                                 * through sync_buffer().
 744                                 */
 745                                brelse(bh);
 746                                spin_lock(lock);
 747                        }
 748                }
 749        }
 750
 751        spin_unlock(lock);
 752        blk_finish_plug(&plug);
 753        spin_lock(lock);
 754
 755        while (!list_empty(&tmp)) {
 756                bh = BH_ENTRY(tmp.prev);
 757                get_bh(bh);
 758                mapping = bh->b_assoc_map;
 759                __remove_assoc_queue(bh);
 760                /* Avoid race with mark_buffer_dirty_inode() which does
 761                 * a lockless check and we rely on seeing the dirty bit */
 762                smp_mb();
 763                if (buffer_dirty(bh)) {
 764                        list_add(&bh->b_assoc_buffers,
 765                                 &mapping->private_list);
 766                        bh->b_assoc_map = mapping;
 767                }
 768                spin_unlock(lock);
 769                wait_on_buffer(bh);
 770                if (!buffer_uptodate(bh))
 771                        err = -EIO;
 772                brelse(bh);
 773                spin_lock(lock);
 774        }
 775        
 776        spin_unlock(lock);
 777        err2 = osync_buffers_list(lock, list);
 778        if (err)
 779                return err;
 780        else
 781                return err2;
 782}
 783
 784/*
 785 * Invalidate any and all dirty buffers on a given inode.  We are
 786 * probably unmounting the fs, but that doesn't mean we have already
 787 * done a sync().  Just drop the buffers from the inode list.
 788 *
 789 * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
 790 * assumes that all the buffers are against the blockdev.  Not true
 791 * for reiserfs.
 792 */
 793void invalidate_inode_buffers(struct inode *inode)
 794{
 795        if (inode_has_buffers(inode)) {
 796                struct address_space *mapping = &inode->i_data;
 797                struct list_head *list = &mapping->private_list;
 798                struct address_space *buffer_mapping = mapping->private_data;
 799
 800                spin_lock(&buffer_mapping->private_lock);
 801                while (!list_empty(list))
 802                        __remove_assoc_queue(BH_ENTRY(list->next));
 803                spin_unlock(&buffer_mapping->private_lock);
 804        }
 805}
 806EXPORT_SYMBOL(invalidate_inode_buffers);
 807
 808/*
 809 * Remove any clean buffers from the inode's buffer list.  This is called
 810 * when we're trying to free the inode itself.  Those buffers can pin it.
 811 *
 812 * Returns true if all buffers were removed.
 813 */
 814int remove_inode_buffers(struct inode *inode)
 815{
 816        int ret = 1;
 817
 818        if (inode_has_buffers(inode)) {
 819                struct address_space *mapping = &inode->i_data;
 820                struct list_head *list = &mapping->private_list;
 821                struct address_space *buffer_mapping = mapping->private_data;
 822
 823                spin_lock(&buffer_mapping->private_lock);
 824                while (!list_empty(list)) {
 825                        struct buffer_head *bh = BH_ENTRY(list->next);
 826                        if (buffer_dirty(bh)) {
 827                                ret = 0;
 828                                break;
 829                        }
 830                        __remove_assoc_queue(bh);
 831                }
 832                spin_unlock(&buffer_mapping->private_lock);
 833        }
 834        return ret;
 835}
 836
 837/*
 838 * Create the appropriate buffers when given a page for data area and
 839 * the size of each buffer.. Use the bh->b_this_page linked list to
 840 * follow the buffers created.  Return NULL if unable to create more
 841 * buffers.
 842 *
 843 * The retry flag is used to differentiate async IO (paging, swapping)
 844 * which may not fail from ordinary buffer allocations.
 845 */
 846struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
 847                int retry)
 848{
 849        struct buffer_head *bh, *head;
 850        long offset;
 851
 852try_again:
 853        head = NULL;
 854        offset = PAGE_SIZE;
 855        while ((offset -= size) >= 0) {
 856                bh = alloc_buffer_head(GFP_NOFS);
 857                if (!bh)
 858                        goto no_grow;
 859
 860                bh->b_this_page = head;
 861                bh->b_blocknr = -1;
 862                head = bh;
 863
 864                bh->b_size = size;
 865
 866                /* Link the buffer to its page */
 867                set_bh_page(bh, page, offset);
 868        }
 869        return head;
 870/*
 871 * In case anything failed, we just free everything we got.
 872 */
 873no_grow:
 874        if (head) {
 875                do {
 876                        bh = head;
 877                        head = head->b_this_page;
 878                        free_buffer_head(bh);
 879                } while (head);
 880        }
 881
 882        /*
 883         * Return failure for non-async IO requests.  Async IO requests
 884         * are not allowed to fail, so we have to wait until buffer heads
 885         * become available.  But we don't want tasks sleeping with 
 886         * partially complete buffers, so all were released above.
 887         */
 888        if (!retry)
 889                return NULL;
 890
 891        /* We're _really_ low on memory. Now we just
 892         * wait for old buffer heads to become free due to
 893         * finishing IO.  Since this is an async request and
 894         * the reserve list is empty, we're sure there are 
 895         * async buffer heads in use.
 896         */
 897        free_more_memory();
 898        goto try_again;
 899}
 900EXPORT_SYMBOL_GPL(alloc_page_buffers);
 901
 902static inline void
 903link_dev_buffers(struct page *page, struct buffer_head *head)
 904{
 905        struct buffer_head *bh, *tail;
 906
 907        bh = head;
 908        do {
 909                tail = bh;
 910                bh = bh->b_this_page;
 911        } while (bh);
 912        tail->b_this_page = head;
 913        attach_page_buffers(page, head);
 914}
 915
 916static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
 917{
 918        sector_t retval = ~((sector_t)0);
 919        loff_t sz = i_size_read(bdev->bd_inode);
 920
 921        if (sz) {
 922                unsigned int sizebits = blksize_bits(size);
 923                retval = (sz >> sizebits);
 924        }
 925        return retval;
 926}
 927
 928/*
 929 * Initialise the state of a blockdev page's buffers.
 930 */ 
 931static sector_t
 932init_page_buffers(struct page *page, struct block_device *bdev,
 933                        sector_t block, int size)
 934{
 935        struct buffer_head *head = page_buffers(page);
 936        struct buffer_head *bh = head;
 937        int uptodate = PageUptodate(page);
 938        sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
 939
 940        do {
 941                if (!buffer_mapped(bh)) {
 942                        init_buffer(bh, NULL, NULL);
 943                        bh->b_bdev = bdev;
 944                        bh->b_blocknr = block;
 945                        if (uptodate)
 946                                set_buffer_uptodate(bh);
 947                        if (block < end_block)
 948                                set_buffer_mapped(bh);
 949                }
 950                block++;
 951                bh = bh->b_this_page;
 952        } while (bh != head);
 953
 954        /*
 955         * Caller needs to validate requested block against end of device.
 956         */
 957        return end_block;
 958}
 959
 960/*
 961 * Create the page-cache page that contains the requested block.
 962 *
 963 * This is used purely for blockdev mappings.
 964 */
 965static int
 966grow_dev_page(struct block_device *bdev, sector_t block,
 967                pgoff_t index, int size, int sizebits)
 968{
 969        struct inode *inode = bdev->bd_inode;
 970        struct page *page;
 971        struct buffer_head *bh;
 972        sector_t end_block;
 973        int ret = 0;            /* Will call free_more_memory() */
 974
 975        page = find_or_create_page(inode->i_mapping, index,
 976                (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
 977        if (!page)
 978                return ret;
 979
 980        BUG_ON(!PageLocked(page));
 981
 982        if (page_has_buffers(page)) {
 983                bh = page_buffers(page);
 984                if (bh->b_size == size) {
 985                        end_block = init_page_buffers(page, bdev,
 986                                                index << sizebits, size);
 987                        goto done;
 988                }
 989                if (!try_to_free_buffers(page))
 990                        goto failed;
 991        }
 992
 993        /*
 994         * Allocate some buffers for this page
 995         */
 996        bh = alloc_page_buffers(page, size, 0);
 997        if (!bh)
 998                goto failed;
 999
1000        /*
1001         * Link the page to the buffers and initialise them.  Take the
1002         * lock to be atomic wrt __find_get_block(), which does not
1003         * run under the page lock.
1004         */
1005        spin_lock(&inode->i_mapping->private_lock);
1006        link_dev_buffers(page, bh);
1007        end_block = init_page_buffers(page, bdev, index << sizebits, size);
1008        spin_unlock(&inode->i_mapping->private_lock);
1009done:
1010        ret = (block < end_block) ? 1 : -ENXIO;
1011failed:
1012        unlock_page(page);
1013        page_cache_release(page);
1014        return ret;
1015}
1016
1017/*
1018 * Create buffers for the specified block device block's page.  If
1019 * that page was dirty, the buffers are set dirty also.
1020 */
1021static int
1022grow_buffers(struct block_device *bdev, sector_t block, int size)
1023{
1024        pgoff_t index;
1025        int sizebits;
1026
1027        sizebits = -1;
1028        do {
1029                sizebits++;
1030        } while ((size << sizebits) < PAGE_SIZE);
1031
1032        index = block >> sizebits;
1033
1034        /*
1035         * Check for a block which wants to lie outside our maximum possible
1036         * pagecache index.  (this comparison is done using sector_t types).
1037         */
1038        if (unlikely(index != block >> sizebits)) {
1039                char b[BDEVNAME_SIZE];
1040
1041                printk(KERN_ERR "%s: requested out-of-range block %llu for "
1042                        "device %s\n",
1043                        __func__, (unsigned long long)block,
1044                        bdevname(bdev, b));
1045                return -EIO;
1046        }
1047
1048        /* Create a page with the proper size buffers.. */
1049        return grow_dev_page(bdev, block, index, size, sizebits);
1050}
1051
1052static struct buffer_head *
1053__getblk_slow(struct block_device *bdev, sector_t block, int size)
1054{
1055        /* Size must be multiple of hard sectorsize */
1056        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1057                        (size < 512 || size > PAGE_SIZE))) {
1058                printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1059                                        size);
1060                printk(KERN_ERR "logical block size: %d\n",
1061                                        bdev_logical_block_size(bdev));
1062
1063                dump_stack();
1064                return NULL;
1065        }
1066
1067        for (;;) {
1068                struct buffer_head *bh;
1069                int ret;
1070
1071                bh = __find_get_block(bdev, block, size);
1072                if (bh)
1073                        return bh;
1074
1075                ret = grow_buffers(bdev, block, size);
1076                if (ret < 0)
1077                        return NULL;
1078                if (ret == 0)
1079                        free_more_memory();
1080        }
1081}
1082
1083/*
1084 * The relationship between dirty buffers and dirty pages:
1085 *
1086 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1087 * the page is tagged dirty in its radix tree.
1088 *
1089 * At all times, the dirtiness of the buffers represents the dirtiness of
1090 * subsections of the page.  If the page has buffers, the page dirty bit is
1091 * merely a hint about the true dirty state.
1092 *
1093 * When a page is set dirty in its entirety, all its buffers are marked dirty
1094 * (if the page has buffers).
1095 *
1096 * When a buffer is marked dirty, its page is dirtied, but the page's other
1097 * buffers are not.
1098 *
1099 * Also.  When blockdev buffers are explicitly read with bread(), they
1100 * individually become uptodate.  But their backing page remains not
1101 * uptodate - even if all of its buffers are uptodate.  A subsequent
1102 * block_read_full_page() against that page will discover all the uptodate
1103 * buffers, will set the page uptodate and will perform no I/O.
1104 */
1105
1106/**
1107 * mark_buffer_dirty - mark a buffer_head as needing writeout
1108 * @bh: the buffer_head to mark dirty
1109 *
1110 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1111 * backing page dirty, then tag the page as dirty in its address_space's radix
1112 * tree and then attach the address_space's inode to its superblock's dirty
1113 * inode list.
1114 *
1115 * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1116 * mapping->tree_lock and mapping->host->i_lock.
1117 */
1118void mark_buffer_dirty(struct buffer_head *bh)
1119{
1120        WARN_ON_ONCE(!buffer_uptodate(bh));
1121
1122        trace_block_dirty_buffer(bh);
1123
1124        /*
1125         * Very *carefully* optimize the it-is-already-dirty case.
1126         *
1127         * Don't let the final "is it dirty" escape to before we
1128         * perhaps modified the buffer.
1129         */
1130        if (buffer_dirty(bh)) {
1131                smp_mb();
1132                if (buffer_dirty(bh))
1133                        return;
1134        }
1135
1136        if (!test_set_buffer_dirty(bh)) {
1137                struct page *page = bh->b_page;
1138                if (!TestSetPageDirty(page)) {
1139                        struct address_space *mapping = page_mapping(page);
1140                        if (mapping)
1141                                __set_page_dirty(page, mapping, 0);
1142                }
1143        }
1144}
1145EXPORT_SYMBOL(mark_buffer_dirty);
1146
1147/*
1148 * Decrement a buffer_head's reference count.  If all buffers against a page
1149 * have zero reference count, are clean and unlocked, and if the page is clean
1150 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1151 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1152 * a page but it ends up not being freed, and buffers may later be reattached).
1153 */
1154void __brelse(struct buffer_head * buf)
1155{
1156        if (atomic_read(&buf->b_count)) {
1157                put_bh(buf);
1158                return;
1159        }
1160        WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1161}
1162EXPORT_SYMBOL(__brelse);
1163
1164/*
1165 * bforget() is like brelse(), except it discards any
1166 * potentially dirty data.
1167 */
1168void __bforget(struct buffer_head *bh)
1169{
1170        clear_buffer_dirty(bh);
1171        if (bh->b_assoc_map) {
1172                struct address_space *buffer_mapping = bh->b_page->mapping;
1173
1174                spin_lock(&buffer_mapping->private_lock);
1175                list_del_init(&bh->b_assoc_buffers);
1176                bh->b_assoc_map = NULL;
1177                spin_unlock(&buffer_mapping->private_lock);
1178        }
1179        __brelse(bh);
1180}
1181EXPORT_SYMBOL(__bforget);
1182
1183static struct buffer_head *__bread_slow(struct buffer_head *bh)
1184{
1185        lock_buffer(bh);
1186        if (buffer_uptodate(bh)) {
1187                unlock_buffer(bh);
1188                return bh;
1189        } else {
1190                get_bh(bh);
1191                bh->b_end_io = end_buffer_read_sync;
1192                submit_bh(READ, bh);
1193                wait_on_buffer(bh);
1194                if (buffer_uptodate(bh))
1195                        return bh;
1196        }
1197        brelse(bh);
1198        return NULL;
1199}
1200
1201/*
1202 * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1203 * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1204 * refcount elevated by one when they're in an LRU.  A buffer can only appear
1205 * once in a particular CPU's LRU.  A single buffer can be present in multiple
1206 * CPU's LRUs at the same time.
1207 *
1208 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1209 * sb_find_get_block().
1210 *
1211 * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1212 * a local interrupt disable for that.
1213 */
1214
1215#define BH_LRU_SIZE     8
1216
1217struct bh_lru {
1218        struct buffer_head *bhs[BH_LRU_SIZE];
1219};
1220
1221static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1222
1223#ifdef CONFIG_SMP
1224#define bh_lru_lock()   local_irq_disable()
1225#define bh_lru_unlock() local_irq_enable()
1226#else
1227#define bh_lru_lock()   preempt_disable()
1228#define bh_lru_unlock() preempt_enable()
1229#endif
1230
1231static inline void check_irqs_on(void)
1232{
1233#ifdef irqs_disabled
1234        BUG_ON(irqs_disabled());
1235#endif
1236}
1237
1238/*
1239 * The LRU management algorithm is dopey-but-simple.  Sorry.
1240 */
1241static void bh_lru_install(struct buffer_head *bh)
1242{
1243        struct buffer_head *evictee = NULL;
1244
1245        check_irqs_on();
1246        bh_lru_lock();
1247        if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1248                struct buffer_head *bhs[BH_LRU_SIZE];
1249                int in;
1250                int out = 0;
1251
1252                get_bh(bh);
1253                bhs[out++] = bh;
1254                for (in = 0; in < BH_LRU_SIZE; in++) {
1255                        struct buffer_head *bh2 =
1256                                __this_cpu_read(bh_lrus.bhs[in]);
1257
1258                        if (bh2 == bh) {
1259                                __brelse(bh2);
1260                        } else {
1261                                if (out >= BH_LRU_SIZE) {
1262                                        BUG_ON(evictee != NULL);
1263                                        evictee = bh2;
1264                                } else {
1265                                        bhs[out++] = bh2;
1266                                }
1267                        }
1268                }
1269                while (out < BH_LRU_SIZE)
1270                        bhs[out++] = NULL;
1271                memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1272        }
1273        bh_lru_unlock();
1274
1275        if (evictee)
1276                __brelse(evictee);
1277}
1278
1279/*
1280 * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1281 */
1282static struct buffer_head *
1283lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1284{
1285        struct buffer_head *ret = NULL;
1286        unsigned int i;
1287
1288        check_irqs_on();
1289        bh_lru_lock();
1290        for (i = 0; i < BH_LRU_SIZE; i++) {
1291                struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1292
1293                if (bh && bh->b_bdev == bdev &&
1294                                bh->b_blocknr == block && bh->b_size == size) {
1295                        if (i) {
1296                                while (i) {
1297                                        __this_cpu_write(bh_lrus.bhs[i],
1298                                                __this_cpu_read(bh_lrus.bhs[i - 1]));
1299                                        i--;
1300                                }
1301                                __this_cpu_write(bh_lrus.bhs[0], bh);
1302                        }
1303                        get_bh(bh);
1304                        ret = bh;
1305                        break;
1306                }
1307        }
1308        bh_lru_unlock();
1309        return ret;
1310}
1311
1312/*
1313 * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1314 * it in the LRU and mark it as accessed.  If it is not present then return
1315 * NULL
1316 */
1317struct buffer_head *
1318__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1319{
1320        struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1321
1322        if (bh == NULL) {
1323                bh = __find_get_block_slow(bdev, block);
1324                if (bh)
1325                        bh_lru_install(bh);
1326        }
1327        if (bh)
1328                touch_buffer(bh);
1329        return bh;
1330}
1331EXPORT_SYMBOL(__find_get_block);
1332
1333/*
1334 * __getblk will locate (and, if necessary, create) the buffer_head
1335 * which corresponds to the passed block_device, block and size. The
1336 * returned buffer has its reference count incremented.
1337 *
1338 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1339 * attempt is failing.  FIXME, perhaps?
1340 */
1341struct buffer_head *
1342__getblk(struct block_device *bdev, sector_t block, unsigned size)
1343{
1344        struct buffer_head *bh = __find_get_block(bdev, block, size);
1345
1346        might_sleep();
1347        if (bh == NULL)
1348                bh = __getblk_slow(bdev, block, size);
1349        return bh;
1350}
1351EXPORT_SYMBOL(__getblk);
1352
1353/*
1354 * Do async read-ahead on a buffer..
1355 */
1356void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1357{
1358        struct buffer_head *bh = __getblk(bdev, block, size);
1359        if (likely(bh)) {
1360                ll_rw_block(READA, 1, &bh);
1361                brelse(bh);
1362        }
1363}
1364EXPORT_SYMBOL(__breadahead);
1365
1366/**
1367 *  __bread() - reads a specified block and returns the bh
1368 *  @bdev: the block_device to read from
1369 *  @block: number of block
1370 *  @size: size (in bytes) to read
1371 * 
1372 *  Reads a specified block, and returns buffer head that contains it.
1373 *  It returns NULL if the block was unreadable.
1374 */
1375struct buffer_head *
1376__bread(struct block_device *bdev, sector_t block, unsigned size)
1377{
1378        struct buffer_head *bh = __getblk(bdev, block, size);
1379
1380        if (likely(bh) && !buffer_uptodate(bh))
1381                bh = __bread_slow(bh);
1382        return bh;
1383}
1384EXPORT_SYMBOL(__bread);
1385
1386/*
1387 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1388 * This doesn't race because it runs in each cpu either in irq
1389 * or with preempt disabled.
1390 */
1391static void invalidate_bh_lru(void *arg)
1392{
1393        struct bh_lru *b = &get_cpu_var(bh_lrus);
1394        int i;
1395
1396        for (i = 0; i < BH_LRU_SIZE; i++) {
1397                brelse(b->bhs[i]);
1398                b->bhs[i] = NULL;
1399        }
1400        put_cpu_var(bh_lrus);
1401}
1402
1403static bool has_bh_in_lru(int cpu, void *dummy)
1404{
1405        struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1406        int i;
1407        
1408        for (i = 0; i < BH_LRU_SIZE; i++) {
1409                if (b->bhs[i])
1410                        return 1;
1411        }
1412
1413        return 0;
1414}
1415
1416void invalidate_bh_lrus(void)
1417{
1418        on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
1419}
1420EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1421
1422void set_bh_page(struct buffer_head *bh,
1423                struct page *page, unsigned long offset)
1424{
1425        bh->b_page = page;
1426        BUG_ON(offset >= PAGE_SIZE);
1427        if (PageHighMem(page))
1428                /*
1429                 * This catches illegal uses and preserves the offset:
1430                 */
1431                bh->b_data = (char *)(0 + offset);
1432        else
1433                bh->b_data = page_address(page) + offset;
1434}
1435EXPORT_SYMBOL(set_bh_page);
1436
1437/*
1438 * Called when truncating a buffer on a page completely.
1439 */
1440static void discard_buffer(struct buffer_head * bh)
1441{
1442        lock_buffer(bh);
1443        clear_buffer_dirty(bh);
1444        bh->b_bdev = NULL;
1445        clear_buffer_mapped(bh);
1446        clear_buffer_req(bh);
1447        clear_buffer_new(bh);
1448        clear_buffer_delay(bh);
1449        clear_buffer_unwritten(bh);
1450        unlock_buffer(bh);
1451}
1452
1453/**
1454 * block_invalidatepage - invalidate part or all of a buffer-backed page
1455 *
1456 * @page: the page which is affected
1457 * @offset: the index of the truncation point
1458 *
1459 * block_invalidatepage() is called when all or part of the page has become
1460 * invalidated by a truncate operation.
1461 *
1462 * block_invalidatepage() does not have to release all buffers, but it must
1463 * ensure that no dirty buffer is left outside @offset and that no I/O
1464 * is underway against any of the blocks which are outside the truncation
1465 * point.  Because the caller is about to free (and possibly reuse) those
1466 * blocks on-disk.
1467 */
1468void block_invalidatepage(struct page *page, unsigned long offset)
1469{
1470        struct buffer_head *head, *bh, *next;
1471        unsigned int curr_off = 0;
1472
1473        BUG_ON(!PageLocked(page));
1474        if (!page_has_buffers(page))
1475                goto out;
1476
1477        head = page_buffers(page);
1478        bh = head;
1479        do {
1480                unsigned int next_off = curr_off + bh->b_size;
1481                next = bh->b_this_page;
1482
1483                /*
1484                 * is this block fully invalidated?
1485                 */
1486                if (offset <= curr_off)
1487                        discard_buffer(bh);
1488                curr_off = next_off;
1489                bh = next;
1490        } while (bh != head);
1491
1492        /*
1493         * We release buffers only if the entire page is being invalidated.
1494         * The get_block cached value has been unconditionally invalidated,
1495         * so real IO is not possible anymore.
1496         */
1497        if (offset == 0)
1498                try_to_release_page(page, 0);
1499out:
1500        return;
1501}
1502EXPORT_SYMBOL(block_invalidatepage);
1503
1504/*
1505 * We attach and possibly dirty the buffers atomically wrt
1506 * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1507 * is already excluded via the page lock.
1508 */
1509void create_empty_buffers(struct page *page,
1510                        unsigned long blocksize, unsigned long b_state)
1511{
1512        struct buffer_head *bh, *head, *tail;
1513
1514        head = alloc_page_buffers(page, blocksize, 1);
1515        bh = head;
1516        do {
1517                bh->b_state |= b_state;
1518                tail = bh;
1519                bh = bh->b_this_page;
1520        } while (bh);
1521        tail->b_this_page = head;
1522
1523        spin_lock(&page->mapping->private_lock);
1524        if (PageUptodate(page) || PageDirty(page)) {
1525                bh = head;
1526                do {
1527                        if (PageDirty(page))
1528                                set_buffer_dirty(bh);
1529                        if (PageUptodate(page))
1530                                set_buffer_uptodate(bh);
1531                        bh = bh->b_this_page;
1532                } while (bh != head);
1533        }
1534        attach_page_buffers(page, head);
1535        spin_unlock(&page->mapping->private_lock);
1536}
1537EXPORT_SYMBOL(create_empty_buffers);
1538
1539/*
1540 * We are taking a block for data and we don't want any output from any
1541 * buffer-cache aliases starting from return from that function and
1542 * until the moment when something will explicitly mark the buffer
1543 * dirty (hopefully that will not happen until we will free that block ;-)
1544 * We don't even need to mark it not-uptodate - nobody can expect
1545 * anything from a newly allocated buffer anyway. We used to used
1546 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1547 * don't want to mark the alias unmapped, for example - it would confuse
1548 * anyone who might pick it with bread() afterwards...
1549 *
1550 * Also..  Note that bforget() doesn't lock the buffer.  So there can
1551 * be writeout I/O going on against recently-freed buffers.  We don't
1552 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1553 * only if we really need to.  That happens here.
1554 */
1555void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1556{
1557        struct buffer_head *old_bh;
1558
1559        might_sleep();
1560
1561        old_bh = __find_get_block_slow(bdev, block);
1562        if (old_bh) {
1563                clear_buffer_dirty(old_bh);
1564                wait_on_buffer(old_bh);
1565                clear_buffer_req(old_bh);
1566                __brelse(old_bh);
1567        }
1568}
1569EXPORT_SYMBOL(unmap_underlying_metadata);
1570
1571/*
1572 * Size is a power-of-two in the range 512..PAGE_SIZE,
1573 * and the case we care about most is PAGE_SIZE.
1574 *
1575 * So this *could* possibly be written with those
1576 * constraints in mind (relevant mostly if some
1577 * architecture has a slow bit-scan instruction)
1578 */
1579static inline int block_size_bits(unsigned int blocksize)
1580{
1581        return ilog2(blocksize);
1582}
1583
1584static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1585{
1586        BUG_ON(!PageLocked(page));
1587
1588        if (!page_has_buffers(page))
1589                create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
1590        return page_buffers(page);
1591}
1592
1593/*
1594 * NOTE! All mapped/uptodate combinations are valid:
1595 *
1596 *      Mapped  Uptodate        Meaning
1597 *
1598 *      No      No              "unknown" - must do get_block()
1599 *      No      Yes             "hole" - zero-filled
1600 *      Yes     No              "allocated" - allocated on disk, not read in
1601 *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1602 *
1603 * "Dirty" is valid only with the last case (mapped+uptodate).
1604 */
1605
1606/*
1607 * While block_write_full_page is writing back the dirty buffers under
1608 * the page lock, whoever dirtied the buffers may decide to clean them
1609 * again at any time.  We handle that by only looking at the buffer
1610 * state inside lock_buffer().
1611 *
1612 * If block_write_full_page() is called for regular writeback
1613 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1614 * locked buffer.   This only can happen if someone has written the buffer
1615 * directly, with submit_bh().  At the address_space level PageWriteback
1616 * prevents this contention from occurring.
1617 *
1618 * If block_write_full_page() is called with wbc->sync_mode ==
1619 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1620 * causes the writes to be flagged as synchronous writes.
1621 */
1622static int __block_write_full_page(struct inode *inode, struct page *page,
1623                        get_block_t *get_block, struct writeback_control *wbc,
1624                        bh_end_io_t *handler)
1625{
1626        int err;
1627        sector_t block;
1628        sector_t last_block;
1629        struct buffer_head *bh, *head;
1630        unsigned int blocksize, bbits;
1631        int nr_underway = 0;
1632        int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1633                        WRITE_SYNC : WRITE);
1634
1635        head = create_page_buffers(page, inode,
1636                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
1637
1638        /*
1639         * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1640         * here, and the (potentially unmapped) buffers may become dirty at
1641         * any time.  If a buffer becomes dirty here after we've inspected it
1642         * then we just miss that fact, and the page stays dirty.
1643         *
1644         * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1645         * handle that here by just cleaning them.
1646         */
1647
1648        bh = head;
1649        blocksize = bh->b_size;
1650        bbits = block_size_bits(blocksize);
1651
1652        block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1653        last_block = (i_size_read(inode) - 1) >> bbits;
1654
1655        /*
1656         * Get all the dirty buffers mapped to disk addresses and
1657         * handle any aliases from the underlying blockdev's mapping.
1658         */
1659        do {
1660                if (block > last_block) {
1661                        /*
1662                         * mapped buffers outside i_size will occur, because
1663                         * this page can be outside i_size when there is a
1664                         * truncate in progress.
1665                         */
1666                        /*
1667                         * The buffer was zeroed by block_write_full_page()
1668                         */
1669                        clear_buffer_dirty(bh);
1670                        set_buffer_uptodate(bh);
1671                } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1672                           buffer_dirty(bh)) {
1673                        WARN_ON(bh->b_size != blocksize);
1674                        err = get_block(inode, block, bh, 1);
1675                        if (err)
1676                                goto recover;
1677                        clear_buffer_delay(bh);
1678                        if (buffer_new(bh)) {
1679                                /* blockdev mappings never come here */
1680                                clear_buffer_new(bh);
1681                                unmap_underlying_metadata(bh->b_bdev,
1682                                                        bh->b_blocknr);
1683                        }
1684                }
1685                bh = bh->b_this_page;
1686                block++;
1687        } while (bh != head);
1688
1689        do {
1690                if (!buffer_mapped(bh))
1691                        continue;
1692                /*
1693                 * If it's a fully non-blocking write attempt and we cannot
1694                 * lock the buffer then redirty the page.  Note that this can
1695                 * potentially cause a busy-wait loop from writeback threads
1696                 * and kswapd activity, but those code paths have their own
1697                 * higher-level throttling.
1698                 */
1699                if (wbc->sync_mode != WB_SYNC_NONE) {
1700                        lock_buffer(bh);
1701                } else if (!trylock_buffer(bh)) {
1702                        redirty_page_for_writepage(wbc, page);
1703                        continue;
1704                }
1705                if (test_clear_buffer_dirty(bh)) {
1706                        mark_buffer_async_write_endio(bh, handler);
1707                } else {
1708                        unlock_buffer(bh);
1709                }
1710        } while ((bh = bh->b_this_page) != head);
1711
1712        /*
1713         * The page and its buffers are protected by PageWriteback(), so we can
1714         * drop the bh refcounts early.
1715         */
1716        BUG_ON(PageWriteback(page));
1717        set_page_writeback(page);
1718
1719        do {
1720                struct buffer_head *next = bh->b_this_page;
1721                if (buffer_async_write(bh)) {
1722                        submit_bh(write_op, bh);
1723                        nr_underway++;
1724                }
1725                bh = next;
1726        } while (bh != head);
1727        unlock_page(page);
1728
1729        err = 0;
1730done:
1731        if (nr_underway == 0) {
1732                /*
1733                 * The page was marked dirty, but the buffers were
1734                 * clean.  Someone wrote them back by hand with
1735                 * ll_rw_block/submit_bh.  A rare case.
1736                 */
1737                end_page_writeback(page);
1738
1739                /*
1740                 * The page and buffer_heads can be released at any time from
1741                 * here on.
1742                 */
1743        }
1744        return err;
1745
1746recover:
1747        /*
1748         * ENOSPC, or some other error.  We may already have added some
1749         * blocks to the file, so we need to write these out to avoid
1750         * exposing stale data.
1751         * The page is currently locked and not marked for writeback
1752         */
1753        bh = head;
1754        /* Recovery: lock and submit the mapped buffers */
1755        do {
1756                if (buffer_mapped(bh) && buffer_dirty(bh) &&
1757                    !buffer_delay(bh)) {
1758                        lock_buffer(bh);
1759                        mark_buffer_async_write_endio(bh, handler);
1760                } else {
1761                        /*
1762                         * The buffer may have been set dirty during
1763                         * attachment to a dirty page.
1764                         */
1765                        clear_buffer_dirty(bh);
1766                }
1767        } while ((bh = bh->b_this_page) != head);
1768        SetPageError(page);
1769        BUG_ON(PageWriteback(page));
1770        mapping_set_error(page->mapping, err);
1771        set_page_writeback(page);
1772        do {
1773                struct buffer_head *next = bh->b_this_page;
1774                if (buffer_async_write(bh)) {
1775                        clear_buffer_dirty(bh);
1776                        submit_bh(write_op, bh);
1777                        nr_underway++;
1778                }
1779                bh = next;
1780        } while (bh != head);
1781        unlock_page(page);
1782        goto done;
1783}
1784
1785/*
1786 * If a page has any new buffers, zero them out here, and mark them uptodate
1787 * and dirty so they'll be written out (in order to prevent uninitialised
1788 * block data from leaking). And clear the new bit.
1789 */
1790void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1791{
1792        unsigned int block_start, block_end;
1793        struct buffer_head *head, *bh;
1794
1795        BUG_ON(!PageLocked(page));
1796        if (!page_has_buffers(page))
1797                return;
1798
1799        bh = head = page_buffers(page);
1800        block_start = 0;
1801        do {
1802                block_end = block_start + bh->b_size;
1803
1804                if (buffer_new(bh)) {
1805                        if (block_end > from && block_start < to) {
1806                                if (!PageUptodate(page)) {
1807                                        unsigned start, size;
1808
1809                                        start = max(from, block_start);
1810                                        size = min(to, block_end) - start;
1811
1812                                        zero_user(page, start, size);
1813                                        set_buffer_uptodate(bh);
1814                                }
1815
1816                                clear_buffer_new(bh);
1817                                mark_buffer_dirty(bh);
1818                        }
1819                }
1820
1821                block_start = block_end;
1822                bh = bh->b_this_page;
1823        } while (bh != head);
1824}
1825EXPORT_SYMBOL(page_zero_new_buffers);
1826
1827int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1828                get_block_t *get_block)
1829{
1830        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1831        unsigned to = from + len;
1832        struct inode *inode = page->mapping->host;
1833        unsigned block_start, block_end;
1834        sector_t block;
1835        int err = 0;
1836        unsigned blocksize, bbits;
1837        struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1838
1839        BUG_ON(!PageLocked(page));
1840        BUG_ON(from > PAGE_CACHE_SIZE);
1841        BUG_ON(to > PAGE_CACHE_SIZE);
1842        BUG_ON(from > to);
1843
1844        head = create_page_buffers(page, inode, 0);
1845        blocksize = head->b_size;
1846        bbits = block_size_bits(blocksize);
1847
1848        block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1849
1850        for(bh = head, block_start = 0; bh != head || !block_start;
1851            block++, block_start=block_end, bh = bh->b_this_page) {
1852                block_end = block_start + blocksize;
1853                if (block_end <= from || block_start >= to) {
1854                        if (PageUptodate(page)) {
1855                                if (!buffer_uptodate(bh))
1856                                        set_buffer_uptodate(bh);
1857                        }
1858                        continue;
1859                }
1860                if (buffer_new(bh))
1861                        clear_buffer_new(bh);
1862                if (!buffer_mapped(bh)) {
1863                        WARN_ON(bh->b_size != blocksize);
1864                        err = get_block(inode, block, bh, 1);
1865                        if (err)
1866                                break;
1867                        if (buffer_new(bh)) {
1868                                unmap_underlying_metadata(bh->b_bdev,
1869                                                        bh->b_blocknr);
1870                                if (PageUptodate(page)) {
1871                                        clear_buffer_new(bh);
1872                                        set_buffer_uptodate(bh);
1873                                        mark_buffer_dirty(bh);
1874                                        continue;
1875                                }
1876                                if (block_end > to || block_start < from)
1877                                        zero_user_segments(page,
1878                                                to, block_end,
1879                                                block_start, from);
1880                                continue;
1881                        }
1882                }
1883                if (PageUptodate(page)) {
1884                        if (!buffer_uptodate(bh))
1885                                set_buffer_uptodate(bh);
1886                        continue; 
1887                }
1888                if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1889                    !buffer_unwritten(bh) &&
1890                     (block_start < from || block_end > to)) {
1891                        ll_rw_block(READ, 1, &bh);
1892                        *wait_bh++=bh;
1893                }
1894        }
1895        /*
1896         * If we issued read requests - let them complete.
1897         */
1898        while(wait_bh > wait) {
1899                wait_on_buffer(*--wait_bh);
1900                if (!buffer_uptodate(*wait_bh))
1901                        err = -EIO;
1902        }
1903        if (unlikely(err))
1904                page_zero_new_buffers(page, from, to);
1905        return err;
1906}
1907EXPORT_SYMBOL(__block_write_begin);
1908
1909static int __block_commit_write(struct inode *inode, struct page *page,
1910                unsigned from, unsigned to)
1911{
1912        unsigned block_start, block_end;
1913        int partial = 0;
1914        unsigned blocksize;
1915        struct buffer_head *bh, *head;
1916
1917        bh = head = page_buffers(page);
1918        blocksize = bh->b_size;
1919
1920        block_start = 0;
1921        do {
1922                block_end = block_start + blocksize;
1923                if (block_end <= from || block_start >= to) {
1924                        if (!buffer_uptodate(bh))
1925                                partial = 1;
1926                } else {
1927                        set_buffer_uptodate(bh);
1928                        mark_buffer_dirty(bh);
1929                }
1930                clear_buffer_new(bh);
1931
1932                block_start = block_end;
1933                bh = bh->b_this_page;
1934        } while (bh != head);
1935
1936        /*
1937         * If this is a partial write which happened to make all buffers
1938         * uptodate then we can optimize away a bogus readpage() for
1939         * the next read(). Here we 'discover' whether the page went
1940         * uptodate as a result of this (potentially partial) write.
1941         */
1942        if (!partial)
1943                SetPageUptodate(page);
1944        return 0;
1945}
1946
1947/*
1948 * block_write_begin takes care of the basic task of block allocation and
1949 * bringing partial write blocks uptodate first.
1950 *
1951 * The filesystem needs to handle block truncation upon failure.
1952 */
1953int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1954                unsigned flags, struct page **pagep, get_block_t *get_block)
1955{
1956        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1957        struct page *page;
1958        int status;
1959
1960        page = grab_cache_page_write_begin(mapping, index, flags);
1961        if (!page)
1962                return -ENOMEM;
1963
1964        status = __block_write_begin(page, pos, len, get_block);
1965        if (unlikely(status)) {
1966                unlock_page(page);
1967                page_cache_release(page);
1968                page = NULL;
1969        }
1970
1971        *pagep = page;
1972        return status;
1973}
1974EXPORT_SYMBOL(block_write_begin);
1975
1976int block_write_end(struct file *file, struct address_space *mapping,
1977                        loff_t pos, unsigned len, unsigned copied,
1978                        struct page *page, void *fsdata)
1979{
1980        struct inode *inode = mapping->host;
1981        unsigned start;
1982
1983        start = pos & (PAGE_CACHE_SIZE - 1);
1984
1985        if (unlikely(copied < len)) {
1986                /*
1987                 * The buffers that were written will now be uptodate, so we
1988                 * don't have to worry about a readpage reading them and
1989                 * overwriting a partial write. However if we have encountered
1990                 * a short write and only partially written into a buffer, it
1991                 * will not be marked uptodate, so a readpage might come in and
1992                 * destroy our partial write.
1993                 *
1994                 * Do the simplest thing, and just treat any short write to a
1995                 * non uptodate page as a zero-length write, and force the
1996                 * caller to redo the whole thing.
1997                 */
1998                if (!PageUptodate(page))
1999                        copied = 0;
2000
2001                page_zero_new_buffers(page, start+copied, start+len);
2002        }
2003        flush_dcache_page(page);
2004
2005        /* This could be a short (even 0-length) commit */
2006        __block_commit_write(inode, page, start, start+copied);
2007
2008        return copied;
2009}
2010EXPORT_SYMBOL(block_write_end);
2011
2012int generic_write_end(struct file *file, struct address_space *mapping,
2013                        loff_t pos, unsigned len, unsigned copied,
2014                        struct page *page, void *fsdata)
2015{
2016        struct inode *inode = mapping->host;
2017        int i_size_changed = 0;
2018
2019        copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2020
2021        /*
2022         * No need to use i_size_read() here, the i_size
2023         * cannot change under us because we hold i_mutex.
2024         *
2025         * But it's important to update i_size while still holding page lock:
2026         * page writeout could otherwise come in and zero beyond i_size.
2027         */
2028        if (pos+copied > inode->i_size) {
2029                i_size_write(inode, pos+copied);
2030                i_size_changed = 1;
2031        }
2032
2033        unlock_page(page);
2034        page_cache_release(page);
2035
2036        /*
2037         * Don't mark the inode dirty under page lock. First, it unnecessarily
2038         * makes the holding time of page lock longer. Second, it forces lock
2039         * ordering of page lock and transaction start for journaling
2040         * filesystems.
2041         */
2042        if (i_size_changed)
2043                mark_inode_dirty(inode);
2044
2045        return copied;
2046}
2047EXPORT_SYMBOL(generic_write_end);
2048
2049/*
2050 * block_is_partially_uptodate checks whether buffers within a page are
2051 * uptodate or not.
2052 *
2053 * Returns true if all buffers which correspond to a file portion
2054 * we want to read are uptodate.
2055 */
2056int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2057                                        unsigned long from)
2058{
2059        unsigned block_start, block_end, blocksize;
2060        unsigned to;
2061        struct buffer_head *bh, *head;
2062        int ret = 1;
2063
2064        if (!page_has_buffers(page))
2065                return 0;
2066
2067        head = page_buffers(page);
2068        blocksize = head->b_size;
2069        to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2070        to = from + to;
2071        if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2072                return 0;
2073
2074        bh = head;
2075        block_start = 0;
2076        do {
2077                block_end = block_start + blocksize;
2078                if (block_end > from && block_start < to) {
2079                        if (!buffer_uptodate(bh)) {
2080                                ret = 0;
2081                                break;
2082                        }
2083                        if (block_end >= to)
2084                                break;
2085                }
2086                block_start = block_end;
2087                bh = bh->b_this_page;
2088        } while (bh != head);
2089
2090        return ret;
2091}
2092EXPORT_SYMBOL(block_is_partially_uptodate);
2093
2094/*
2095 * Generic "read page" function for block devices that have the normal
2096 * get_block functionality. This is most of the block device filesystems.
2097 * Reads the page asynchronously --- the unlock_buffer() and
2098 * set/clear_buffer_uptodate() functions propagate buffer state into the
2099 * page struct once IO has completed.
2100 */
2101int block_read_full_page(struct page *page, get_block_t *get_block)
2102{
2103        struct inode *inode = page->mapping->host;
2104        sector_t iblock, lblock;
2105        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2106        unsigned int blocksize, bbits;
2107        int nr, i;
2108        int fully_mapped = 1;
2109
2110        head = create_page_buffers(page, inode, 0);
2111        blocksize = head->b_size;
2112        bbits = block_size_bits(blocksize);
2113
2114        iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
2115        lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2116        bh = head;
2117        nr = 0;
2118        i = 0;
2119
2120        do {
2121                if (buffer_uptodate(bh))
2122                        continue;
2123
2124                if (!buffer_mapped(bh)) {
2125                        int err = 0;
2126
2127                        fully_mapped = 0;
2128                        if (iblock < lblock) {
2129                                WARN_ON(bh->b_size != blocksize);
2130                                err = get_block(inode, iblock, bh, 0);
2131                                if (err)
2132                                        SetPageError(page);
2133                        }
2134                        if (!buffer_mapped(bh)) {
2135                                zero_user(page, i * blocksize, blocksize);
2136                                if (!err)
2137                                        set_buffer_uptodate(bh);
2138                                continue;
2139                        }
2140                        /*
2141                         * get_block() might have updated the buffer
2142                         * synchronously
2143                         */
2144                        if (buffer_uptodate(bh))
2145                                continue;
2146                }
2147                arr[nr++] = bh;
2148        } while (i++, iblock++, (bh = bh->b_this_page) != head);
2149
2150        if (fully_mapped)
2151                SetPageMappedToDisk(page);
2152
2153        if (!nr) {
2154                /*
2155                 * All buffers are uptodate - we can set the page uptodate
2156                 * as well. But not if get_block() returned an error.
2157                 */
2158                if (!PageError(page))
2159                        SetPageUptodate(page);
2160                unlock_page(page);
2161                return 0;
2162        }
2163
2164        /* Stage two: lock the buffers */
2165        for (i = 0; i < nr; i++) {
2166                bh = arr[i];
2167                lock_buffer(bh);
2168                mark_buffer_async_read(bh);
2169        }
2170
2171        /*
2172         * Stage 3: start the IO.  Check for uptodateness
2173         * inside the buffer lock in case another process reading
2174         * the underlying blockdev brought it uptodate (the sct fix).
2175         */
2176        for (i = 0; i < nr; i++) {
2177                bh = arr[i];
2178                if (buffer_uptodate(bh))
2179                        end_buffer_async_read(bh, 1);
2180                else
2181                        submit_bh(READ, bh);
2182        }
2183        return 0;
2184}
2185EXPORT_SYMBOL(block_read_full_page);
2186
2187/* utility function for filesystems that need to do work on expanding
2188 * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2189 * deal with the hole.  
2190 */
2191int generic_cont_expand_simple(struct inode *inode, loff_t size)
2192{
2193        struct address_space *mapping = inode->i_mapping;
2194        struct page *page;
2195        void *fsdata;
2196        int err;
2197
2198        err = inode_newsize_ok(inode, size);
2199        if (err)
2200                goto out;
2201
2202        err = pagecache_write_begin(NULL, mapping, size, 0,
2203                                AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2204                                &page, &fsdata);
2205        if (err)
2206                goto out;
2207
2208        err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2209        BUG_ON(err > 0);
2210
2211out:
2212        return err;
2213}
2214EXPORT_SYMBOL(generic_cont_expand_simple);
2215
2216static int cont_expand_zero(struct file *file, struct address_space *mapping,
2217                            loff_t pos, loff_t *bytes)
2218{
2219        struct inode *inode = mapping->host;
2220        unsigned blocksize = 1 << inode->i_blkbits;
2221        struct page *page;
2222        void *fsdata;
2223        pgoff_t index, curidx;
2224        loff_t curpos;
2225        unsigned zerofrom, offset, len;
2226        int err = 0;
2227
2228        index = pos >> PAGE_CACHE_SHIFT;
2229        offset = pos & ~PAGE_CACHE_MASK;
2230
2231        while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2232                zerofrom = curpos & ~PAGE_CACHE_MASK;
2233                if (zerofrom & (blocksize-1)) {
2234                        *bytes |= (blocksize-1);
2235                        (*bytes)++;
2236                }
2237                len = PAGE_CACHE_SIZE - zerofrom;
2238
2239                err = pagecache_write_begin(file, mapping, curpos, len,
2240                                                AOP_FLAG_UNINTERRUPTIBLE,
2241                                                &page, &fsdata);
2242                if (err)
2243                        goto out;
2244                zero_user(page, zerofrom, len);
2245                err = pagecache_write_end(file, mapping, curpos, len, len,
2246                                                page, fsdata);
2247                if (err < 0)
2248                        goto out;
2249                BUG_ON(err != len);
2250                err = 0;
2251
2252                balance_dirty_pages_ratelimited(mapping);
2253        }
2254
2255        /* page covers the boundary, find the boundary offset */
2256        if (index == curidx) {
2257                zerofrom = curpos & ~PAGE_CACHE_MASK;
2258                /* if we will expand the thing last block will be filled */
2259                if (offset <= zerofrom) {
2260                        goto out;
2261                }
2262                if (zerofrom & (blocksize-1)) {
2263                        *bytes |= (blocksize-1);
2264                        (*bytes)++;
2265                }
2266                len = offset - zerofrom;
2267
2268                err = pagecache_write_begin(file, mapping, curpos, len,
2269                                                AOP_FLAG_UNINTERRUPTIBLE,
2270                                                &page, &fsdata);
2271                if (err)
2272                        goto out;
2273                zero_user(page, zerofrom, len);
2274                err = pagecache_write_end(file, mapping, curpos, len, len,
2275                                                page, fsdata);
2276                if (err < 0)
2277                        goto out;
2278                BUG_ON(err != len);
2279                err = 0;
2280        }
2281out:
2282        return err;
2283}
2284
2285/*
2286 * For moronic filesystems that do not allow holes in file.
2287 * We may have to extend the file.
2288 */
2289int cont_write_begin(struct file *file, struct address_space *mapping,
2290                        loff_t pos, unsigned len, unsigned flags,
2291                        struct page **pagep, void **fsdata,
2292                        get_block_t *get_block, loff_t *bytes)
2293{
2294        struct inode *inode = mapping->host;
2295        unsigned blocksize = 1 << inode->i_blkbits;
2296        unsigned zerofrom;
2297        int err;
2298
2299        err = cont_expand_zero(file, mapping, pos, bytes);
2300        if (err)
2301                return err;
2302
2303        zerofrom = *bytes & ~PAGE_CACHE_MASK;
2304        if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2305                *bytes |= (blocksize-1);
2306                (*bytes)++;
2307        }
2308
2309        return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2310}
2311EXPORT_SYMBOL(cont_write_begin);
2312
2313int block_commit_write(struct page *page, unsigned from, unsigned to)
2314{
2315        struct inode *inode = page->mapping->host;
2316        __block_commit_write(inode,page,from,to);
2317        return 0;
2318}
2319EXPORT_SYMBOL(block_commit_write);
2320
2321/*
2322 * block_page_mkwrite() is not allowed to change the file size as it gets
2323 * called from a page fault handler when a page is first dirtied. Hence we must
2324 * be careful to check for EOF conditions here. We set the page up correctly
2325 * for a written page which means we get ENOSPC checking when writing into
2326 * holes and correct delalloc and unwritten extent mapping on filesystems that
2327 * support these features.
2328 *
2329 * We are not allowed to take the i_mutex here so we have to play games to
2330 * protect against truncate races as the page could now be beyond EOF.  Because
2331 * truncate writes the inode size before removing pages, once we have the
2332 * page lock we can determine safely if the page is beyond EOF. If it is not
2333 * beyond EOF, then the page is guaranteed safe against truncation until we
2334 * unlock the page.
2335 *
2336 * Direct callers of this function should protect against filesystem freezing
2337 * using sb_start_write() - sb_end_write() functions.
2338 */
2339int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2340                         get_block_t get_block)
2341{
2342        struct page *page = vmf->page;
2343        struct inode *inode = file_inode(vma->vm_file);
2344        unsigned long end;
2345        loff_t size;
2346        int ret;
2347
2348        lock_page(page);
2349        size = i_size_read(inode);
2350        if ((page->mapping != inode->i_mapping) ||
2351            (page_offset(page) > size)) {
2352                /* We overload EFAULT to mean page got truncated */
2353                ret = -EFAULT;
2354                goto out_unlock;
2355        }
2356
2357        /* page is wholly or partially inside EOF */
2358        if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2359                end = size & ~PAGE_CACHE_MASK;
2360        else
2361                end = PAGE_CACHE_SIZE;
2362
2363        ret = __block_write_begin(page, 0, end, get_block);
2364        if (!ret)
2365                ret = block_commit_write(page, 0, end);
2366
2367        if (unlikely(ret < 0))
2368                goto out_unlock;
2369        set_page_dirty(page);
2370        wait_for_stable_page(page);
2371        return 0;
2372out_unlock:
2373        unlock_page(page);
2374        return ret;
2375}
2376EXPORT_SYMBOL(__block_page_mkwrite);
2377
2378int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2379                   get_block_t get_block)
2380{
2381        int ret;
2382        struct super_block *sb = file_inode(vma->vm_file)->i_sb;
2383
2384        sb_start_pagefault(sb);
2385
2386        /*
2387         * Update file times before taking page lock. We may end up failing the
2388         * fault so this update may be superfluous but who really cares...
2389         */
2390        file_update_time(vma->vm_file);
2391
2392        ret = __block_page_mkwrite(vma, vmf, get_block);
2393        sb_end_pagefault(sb);
2394        return block_page_mkwrite_return(ret);
2395}
2396EXPORT_SYMBOL(block_page_mkwrite);
2397
2398/*
2399 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2400 * immediately, while under the page lock.  So it needs a special end_io
2401 * handler which does not touch the bh after unlocking it.
2402 */
2403static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2404{
2405        __end_buffer_read_notouch(bh, uptodate);
2406}
2407
2408/*
2409 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2410 * the page (converting it to circular linked list and taking care of page
2411 * dirty races).
2412 */
2413static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2414{
2415        struct buffer_head *bh;
2416
2417        BUG_ON(!PageLocked(page));
2418
2419        spin_lock(&page->mapping->private_lock);
2420        bh = head;
2421        do {
2422                if (PageDirty(page))
2423                        set_buffer_dirty(bh);
2424                if (!bh->b_this_page)
2425                        bh->b_this_page = head;
2426                bh = bh->b_this_page;
2427        } while (bh != head);
2428        attach_page_buffers(page, head);
2429        spin_unlock(&page->mapping->private_lock);
2430}
2431
2432/*
2433 * On entry, the page is fully not uptodate.
2434 * On exit the page is fully uptodate in the areas outside (from,to)
2435 * The filesystem needs to handle block truncation upon failure.
2436 */
2437int nobh_write_begin(struct address_space *mapping,
2438                        loff_t pos, unsigned len, unsigned flags,
2439                        struct page **pagep, void **fsdata,
2440                        get_block_t *get_block)
2441{
2442        struct inode *inode = mapping->host;
2443        const unsigned blkbits = inode->i_blkbits;
2444        const unsigned blocksize = 1 << blkbits;
2445        struct buffer_head *head, *bh;
2446        struct page *page;
2447        pgoff_t index;
2448        unsigned from, to;
2449        unsigned block_in_page;
2450        unsigned block_start, block_end;
2451        sector_t block_in_file;
2452        int nr_reads = 0;
2453        int ret = 0;
2454        int is_mapped_to_disk = 1;
2455
2456        index = pos >> PAGE_CACHE_SHIFT;
2457        from = pos & (PAGE_CACHE_SIZE - 1);
2458        to = from + len;
2459
2460        page = grab_cache_page_write_begin(mapping, index, flags);
2461        if (!page)
2462                return -ENOMEM;
2463        *pagep = page;
2464        *fsdata = NULL;
2465
2466        if (page_has_buffers(page)) {
2467                ret = __block_write_begin(page, pos, len, get_block);
2468                if (unlikely(ret))
2469                        goto out_release;
2470                return ret;
2471        }
2472
2473        if (PageMappedToDisk(page))
2474                return 0;
2475
2476        /*
2477         * Allocate buffers so that we can keep track of state, and potentially
2478         * attach them to the page if an error occurs. In the common case of
2479         * no error, they will just be freed again without ever being attached
2480         * to the page (which is all OK, because we're under the page lock).
2481         *
2482         * Be careful: the buffer linked list is a NULL terminated one, rather
2483         * than the circular one we're used to.
2484         */
2485        head = alloc_page_buffers(page, blocksize, 0);
2486        if (!head) {
2487                ret = -ENOMEM;
2488                goto out_release;
2489        }
2490
2491        block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2492
2493        /*
2494         * We loop across all blocks in the page, whether or not they are
2495         * part of the affected region.  This is so we can discover if the
2496         * page is fully mapped-to-disk.
2497         */
2498        for (block_start = 0, block_in_page = 0, bh = head;
2499                  block_start < PAGE_CACHE_SIZE;
2500                  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2501                int create;
2502
2503                block_end = block_start + blocksize;
2504                bh->b_state = 0;
2505                create = 1;
2506                if (block_start >= to)
2507                        create = 0;
2508                ret = get_block(inode, block_in_file + block_in_page,
2509                                        bh, create);
2510                if (ret)
2511                        goto failed;
2512                if (!buffer_mapped(bh))
2513                        is_mapped_to_disk = 0;
2514                if (buffer_new(bh))
2515                        unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2516                if (PageUptodate(page)) {
2517                        set_buffer_uptodate(bh);
2518                        continue;
2519                }
2520                if (buffer_new(bh) || !buffer_mapped(bh)) {
2521                        zero_user_segments(page, block_start, from,
2522                                                        to, block_end);
2523                        continue;
2524                }
2525                if (buffer_uptodate(bh))
2526                        continue;       /* reiserfs does this */
2527                if (block_start < from || block_end > to) {
2528                        lock_buffer(bh);
2529                        bh->b_end_io = end_buffer_read_nobh;
2530                        submit_bh(READ, bh);
2531                        nr_reads++;
2532                }
2533        }
2534
2535        if (nr_reads) {
2536                /*
2537                 * The page is locked, so these buffers are protected from
2538                 * any VM or truncate activity.  Hence we don't need to care
2539                 * for the buffer_head refcounts.
2540                 */
2541                for (bh = head; bh; bh = bh->b_this_page) {
2542                        wait_on_buffer(bh);
2543                        if (!buffer_uptodate(bh))
2544                                ret = -EIO;
2545                }
2546                if (ret)
2547                        goto failed;
2548        }
2549
2550        if (is_mapped_to_disk)
2551                SetPageMappedToDisk(page);
2552
2553        *fsdata = head; /* to be released by nobh_write_end */
2554
2555        return 0;
2556
2557failed:
2558        BUG_ON(!ret);
2559        /*
2560         * Error recovery is a bit difficult. We need to zero out blocks that
2561         * were newly allocated, and dirty them to ensure they get written out.
2562         * Buffers need to be attached to the page at this point, otherwise
2563         * the handling of potential IO errors during writeout would be hard
2564         * (could try doing synchronous writeout, but what if that fails too?)
2565         */
2566        attach_nobh_buffers(page, head);
2567        page_zero_new_buffers(page, from, to);
2568
2569out_release:
2570        unlock_page(page);
2571        page_cache_release(page);
2572        *pagep = NULL;
2573
2574        return ret;
2575}
2576EXPORT_SYMBOL(nobh_write_begin);
2577
2578int nobh_write_end(struct file *file, struct address_space *mapping,
2579                        loff_t pos, unsigned len, unsigned copied,
2580                        struct page *page, void *fsdata)
2581{
2582        struct inode *inode = page->mapping->host;
2583        struct buffer_head *head = fsdata;
2584        struct buffer_head *bh;
2585        BUG_ON(fsdata != NULL && page_has_buffers(page));
2586
2587        if (unlikely(copied < len) && head)
2588                attach_nobh_buffers(page, head);
2589        if (page_has_buffers(page))
2590                return generic_write_end(file, mapping, pos, len,
2591                                        copied, page, fsdata);
2592
2593        SetPageUptodate(page);
2594        set_page_dirty(page);
2595        if (pos+copied > inode->i_size) {
2596                i_size_write(inode, pos+copied);
2597                mark_inode_dirty(inode);
2598        }
2599
2600        unlock_page(page);
2601        page_cache_release(page);
2602
2603        while (head) {
2604                bh = head;
2605                head = head->b_this_page;
2606                free_buffer_head(bh);
2607        }
2608
2609        return copied;
2610}
2611EXPORT_SYMBOL(nobh_write_end);
2612
2613/*
2614 * nobh_writepage() - based on block_full_write_page() except
2615 * that it tries to operate without attaching bufferheads to
2616 * the page.
2617 */
2618int nobh_writepage(struct page *page, get_block_t *get_block,
2619                        struct writeback_control *wbc)
2620{
2621        struct inode * const inode = page->mapping->host;
2622        loff_t i_size = i_size_read(inode);
2623        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2624        unsigned offset;
2625        int ret;
2626
2627        /* Is the page fully inside i_size? */
2628        if (page->index < end_index)
2629                goto out;
2630
2631        /* Is the page fully outside i_size? (truncate in progress) */
2632        offset = i_size & (PAGE_CACHE_SIZE-1);
2633        if (page->index >= end_index+1 || !offset) {
2634                /*
2635                 * The page may have dirty, unmapped buffers.  For example,
2636                 * they may have been added in ext3_writepage().  Make them
2637                 * freeable here, so the page does not leak.
2638                 */
2639#if 0
2640                /* Not really sure about this  - do we need this ? */
2641                if (page->mapping->a_ops->invalidatepage)
2642                        page->mapping->a_ops->invalidatepage(page, offset);
2643#endif
2644                unlock_page(page);
2645                return 0; /* don't care */
2646        }
2647
2648        /*
2649         * The page straddles i_size.  It must be zeroed out on each and every
2650         * writepage invocation because it may be mmapped.  "A file is mapped
2651         * in multiples of the page size.  For a file that is not a multiple of
2652         * the  page size, the remaining memory is zeroed when mapped, and
2653         * writes to that region are not written out to the file."
2654         */
2655        zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2656out:
2657        ret = mpage_writepage(page, get_block, wbc);
2658        if (ret == -EAGAIN)
2659                ret = __block_write_full_page(inode, page, get_block, wbc,
2660                                              end_buffer_async_write);
2661        return ret;
2662}
2663EXPORT_SYMBOL(nobh_writepage);
2664
2665int nobh_truncate_page(struct address_space *mapping,
2666                        loff_t from, get_block_t *get_block)
2667{
2668        pgoff_t index = from >> PAGE_CACHE_SHIFT;
2669        unsigned offset = from & (PAGE_CACHE_SIZE-1);
2670        unsigned blocksize;
2671        sector_t iblock;
2672        unsigned length, pos;
2673        struct inode *inode = mapping->host;
2674        struct page *page;
2675        struct buffer_head map_bh;
2676        int err;
2677
2678        blocksize = 1 << inode->i_blkbits;
2679        length = offset & (blocksize - 1);
2680
2681        /* Block boundary? Nothing to do */
2682        if (!length)
2683                return 0;
2684
2685        length = blocksize - length;
2686        iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2687
2688        page = grab_cache_page(mapping, index);
2689        err = -ENOMEM;
2690        if (!page)
2691                goto out;
2692
2693        if (page_has_buffers(page)) {
2694has_buffers:
2695                unlock_page(page);
2696                page_cache_release(page);
2697                return block_truncate_page(mapping, from, get_block);
2698        }
2699
2700        /* Find the buffer that contains "offset" */
2701        pos = blocksize;
2702        while (offset >= pos) {
2703                iblock++;
2704                pos += blocksize;
2705        }
2706
2707        map_bh.b_size = blocksize;
2708        map_bh.b_state = 0;
2709        err = get_block(inode, iblock, &map_bh, 0);
2710        if (err)
2711                goto unlock;
2712        /* unmapped? It's a hole - nothing to do */
2713        if (!buffer_mapped(&map_bh))
2714                goto unlock;
2715
2716        /* Ok, it's mapped. Make sure it's up-to-date */
2717        if (!PageUptodate(page)) {
2718                err = mapping->a_ops->readpage(NULL, page);
2719                if (err) {
2720                        page_cache_release(page);
2721                        goto out;
2722                }
2723                lock_page(page);
2724                if (!PageUptodate(page)) {
2725                        err = -EIO;
2726                        goto unlock;
2727                }
2728                if (page_has_buffers(page))
2729                        goto has_buffers;
2730        }
2731        zero_user(page, offset, length);
2732        set_page_dirty(page);
2733        err = 0;
2734
2735unlock:
2736        unlock_page(page);
2737        page_cache_release(page);
2738out:
2739        return err;
2740}
2741EXPORT_SYMBOL(nobh_truncate_page);
2742
2743int block_truncate_page(struct address_space *mapping,
2744                        loff_t from, get_block_t *get_block)
2745{
2746        pgoff_t index = from >> PAGE_CACHE_SHIFT;
2747        unsigned offset = from & (PAGE_CACHE_SIZE-1);
2748        unsigned blocksize;
2749        sector_t iblock;
2750        unsigned length, pos;
2751        struct inode *inode = mapping->host;
2752        struct page *page;
2753        struct buffer_head *bh;
2754        int err;
2755
2756        blocksize = 1 << inode->i_blkbits;
2757        length = offset & (blocksize - 1);
2758
2759        /* Block boundary? Nothing to do */
2760        if (!length)
2761                return 0;
2762
2763        length = blocksize - length;
2764        iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2765        
2766        page = grab_cache_page(mapping, index);
2767        err = -ENOMEM;
2768        if (!page)
2769                goto out;
2770
2771        if (!page_has_buffers(page))
2772                create_empty_buffers(page, blocksize, 0);
2773
2774        /* Find the buffer that contains "offset" */
2775        bh = page_buffers(page);
2776        pos = blocksize;
2777        while (offset >= pos) {
2778                bh = bh->b_this_page;
2779                iblock++;
2780                pos += blocksize;
2781        }
2782
2783        err = 0;
2784        if (!buffer_mapped(bh)) {
2785                WARN_ON(bh->b_size != blocksize);
2786                err = get_block(inode, iblock, bh, 0);
2787                if (err)
2788                        goto unlock;
2789                /* unmapped? It's a hole - nothing to do */
2790                if (!buffer_mapped(bh))
2791                        goto unlock;
2792        }
2793
2794        /* Ok, it's mapped. Make sure it's up-to-date */
2795        if (PageUptodate(page))
2796                set_buffer_uptodate(bh);
2797
2798        if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2799                err = -EIO;
2800                ll_rw_block(READ, 1, &bh);
2801                wait_on_buffer(bh);
2802                /* Uhhuh. Read error. Complain and punt. */
2803                if (!buffer_uptodate(bh))
2804                        goto unlock;
2805        }
2806
2807        zero_user(page, offset, length);
2808        mark_buffer_dirty(bh);
2809        err = 0;
2810
2811unlock:
2812        unlock_page(page);
2813        page_cache_release(page);
2814out:
2815        return err;
2816}
2817EXPORT_SYMBOL(block_truncate_page);
2818
2819/*
2820 * The generic ->writepage function for buffer-backed address_spaces
2821 * this form passes in the end_io handler used to finish the IO.
2822 */
2823int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2824                        struct writeback_control *wbc, bh_end_io_t *handler)
2825{
2826        struct inode * const inode = page->mapping->host;
2827        loff_t i_size = i_size_read(inode);
2828        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2829        unsigned offset;
2830
2831        /* Is the page fully inside i_size? */
2832        if (page->index < end_index)
2833                return __block_write_full_page(inode, page, get_block, wbc,
2834                                               handler);
2835
2836        /* Is the page fully outside i_size? (truncate in progress) */
2837        offset = i_size & (PAGE_CACHE_SIZE-1);
2838        if (page->index >= end_index+1 || !offset) {
2839                /*
2840                 * The page may have dirty, unmapped buffers.  For example,
2841                 * they may have been added in ext3_writepage().  Make them
2842                 * freeable here, so the page does not leak.
2843                 */
2844                do_invalidatepage(page, 0);
2845                unlock_page(page);
2846                return 0; /* don't care */
2847        }
2848
2849        /*
2850         * The page straddles i_size.  It must be zeroed out on each and every
2851         * writepage invocation because it may be mmapped.  "A file is mapped
2852         * in multiples of the page size.  For a file that is not a multiple of
2853         * the  page size, the remaining memory is zeroed when mapped, and
2854         * writes to that region are not written out to the file."
2855         */
2856        zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2857        return __block_write_full_page(inode, page, get_block, wbc, handler);
2858}
2859EXPORT_SYMBOL(block_write_full_page_endio);
2860
2861/*
2862 * The generic ->writepage function for buffer-backed address_spaces
2863 */
2864int block_write_full_page(struct page *page, get_block_t *get_block,
2865                        struct writeback_control *wbc)
2866{
2867        return block_write_full_page_endio(page, get_block, wbc,
2868                                           end_buffer_async_write);
2869}
2870EXPORT_SYMBOL(block_write_full_page);
2871
2872sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2873                            get_block_t *get_block)
2874{
2875        struct buffer_head tmp;
2876        struct inode *inode = mapping->host;
2877        tmp.b_state = 0;
2878        tmp.b_blocknr = 0;
2879        tmp.b_size = 1 << inode->i_blkbits;
2880        get_block(inode, block, &tmp, 0);
2881        return tmp.b_blocknr;
2882}
2883EXPORT_SYMBOL(generic_block_bmap);
2884
2885static void end_bio_bh_io_sync(struct bio *bio, int err)
2886{
2887        struct buffer_head *bh = bio->bi_private;
2888
2889        if (err == -EOPNOTSUPP) {
2890                set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2891        }
2892
2893        if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2894                set_bit(BH_Quiet, &bh->b_state);
2895
2896        bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2897        bio_put(bio);
2898}
2899
2900/*
2901 * This allows us to do IO even on the odd last sectors
2902 * of a device, even if the bh block size is some multiple
2903 * of the physical sector size.
2904 *
2905 * We'll just truncate the bio to the size of the device,
2906 * and clear the end of the buffer head manually.
2907 *
2908 * Truly out-of-range accesses will turn into actual IO
2909 * errors, this only handles the "we need to be able to
2910 * do IO at the final sector" case.
2911 */
2912static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
2913{
2914        sector_t maxsector;
2915        unsigned bytes;
2916
2917        maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
2918        if (!maxsector)
2919                return;
2920
2921        /*
2922         * If the *whole* IO is past the end of the device,
2923         * let it through, and the IO layer will turn it into
2924         * an EIO.
2925         */
2926        if (unlikely(bio->bi_sector >= maxsector))
2927                return;
2928
2929        maxsector -= bio->bi_sector;
2930        bytes = bio->bi_size;
2931        if (likely((bytes >> 9) <= maxsector))
2932                return;
2933
2934        /* Uhhuh. We've got a bh that straddles the device size! */
2935        bytes = maxsector << 9;
2936
2937        /* Truncate the bio.. */
2938        bio->bi_size = bytes;
2939        bio->bi_io_vec[0].bv_len = bytes;
2940
2941        /* ..and clear the end of the buffer for reads */
2942        if ((rw & RW_MASK) == READ) {
2943                void *kaddr = kmap_atomic(bh->b_page);
2944                memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
2945                kunmap_atomic(kaddr);
2946                flush_dcache_page(bh->b_page);
2947        }
2948}
2949
2950int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
2951{
2952        struct bio *bio;
2953        int ret = 0;
2954
2955        BUG_ON(!buffer_locked(bh));
2956        BUG_ON(!buffer_mapped(bh));
2957        BUG_ON(!bh->b_end_io);
2958        BUG_ON(buffer_delay(bh));
2959        BUG_ON(buffer_unwritten(bh));
2960
2961        /*
2962         * Only clear out a write error when rewriting
2963         */
2964        if (test_set_buffer_req(bh) && (rw & WRITE))
2965                clear_buffer_write_io_error(bh);
2966
2967        /*
2968         * from here on down, it's all bio -- do the initial mapping,
2969         * submit_bio -> generic_make_request may further map this bio around
2970         */
2971        bio = bio_alloc(GFP_NOIO, 1);
2972
2973        bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2974        bio->bi_bdev = bh->b_bdev;
2975        bio->bi_io_vec[0].bv_page = bh->b_page;
2976        bio->bi_io_vec[0].bv_len = bh->b_size;
2977        bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2978
2979        bio->bi_vcnt = 1;
2980        bio->bi_size = bh->b_size;
2981
2982        bio->bi_end_io = end_bio_bh_io_sync;
2983        bio->bi_private = bh;
2984        bio->bi_flags |= bio_flags;
2985
2986        /* Take care of bh's that straddle the end of the device */
2987        guard_bh_eod(rw, bio, bh);
2988
2989        if (buffer_meta(bh))
2990                rw |= REQ_META;
2991        if (buffer_prio(bh))
2992                rw |= REQ_PRIO;
2993
2994        bio_get(bio);
2995        submit_bio(rw, bio);
2996
2997        if (bio_flagged(bio, BIO_EOPNOTSUPP))
2998                ret = -EOPNOTSUPP;
2999
3000        bio_put(bio);
3001        return ret;
3002}
3003EXPORT_SYMBOL_GPL(_submit_bh);
3004
3005int submit_bh(int rw, struct buffer_head *bh)
3006{
3007        return _submit_bh(rw, bh, 0);
3008}
3009EXPORT_SYMBOL(submit_bh);
3010
3011/**
3012 * ll_rw_block: low-level access to block devices (DEPRECATED)
3013 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
3014 * @nr: number of &struct buffer_heads in the array
3015 * @bhs: array of pointers to &struct buffer_head
3016 *
3017 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3018 * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3019 * %READA option is described in the documentation for generic_make_request()
3020 * which ll_rw_block() calls.
3021 *
3022 * This function drops any buffer that it cannot get a lock on (with the
3023 * BH_Lock state bit), any buffer that appears to be clean when doing a write
3024 * request, and any buffer that appears to be up-to-date when doing read
3025 * request.  Further it marks as clean buffers that are processed for
3026 * writing (the buffer cache won't assume that they are actually clean
3027 * until the buffer gets unlocked).
3028 *
3029 * ll_rw_block sets b_end_io to simple completion handler that marks
3030 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3031 * any waiters. 
3032 *
3033 * All of the buffers must be for the same device, and must also be a
3034 * multiple of the current approved size for the device.
3035 */
3036void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3037{
3038        int i;
3039
3040        for (i = 0; i < nr; i++) {
3041                struct buffer_head *bh = bhs[i];
3042
3043                if (!trylock_buffer(bh))
3044                        continue;
3045                if (rw == WRITE) {
3046                        if (test_clear_buffer_dirty(bh)) {
3047                                bh->b_end_io = end_buffer_write_sync;
3048                                get_bh(bh);
3049                                submit_bh(WRITE, bh);
3050                                continue;
3051                        }
3052                } else {
3053                        if (!buffer_uptodate(bh)) {
3054                                bh->b_end_io = end_buffer_read_sync;
3055                                get_bh(bh);
3056                                submit_bh(rw, bh);
3057                                continue;
3058                        }
3059                }
3060                unlock_buffer(bh);
3061        }
3062}
3063EXPORT_SYMBOL(ll_rw_block);
3064
3065void write_dirty_buffer(struct buffer_head *bh, int rw)
3066{
3067        lock_buffer(bh);
3068        if (!test_clear_buffer_dirty(bh)) {
3069                unlock_buffer(bh);
3070                return;
3071        }
3072        bh->b_end_io = end_buffer_write_sync;
3073        get_bh(bh);
3074        submit_bh(rw, bh);
3075}
3076EXPORT_SYMBOL(write_dirty_buffer);
3077
3078/*
3079 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3080 * and then start new I/O and then wait upon it.  The caller must have a ref on
3081 * the buffer_head.
3082 */
3083int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3084{
3085        int ret = 0;
3086
3087        WARN_ON(atomic_read(&bh->b_count) < 1);
3088        lock_buffer(bh);
3089        if (test_clear_buffer_dirty(bh)) {
3090                get_bh(bh);
3091                bh->b_end_io = end_buffer_write_sync;
3092                ret = submit_bh(rw, bh);
3093                wait_on_buffer(bh);
3094                if (!ret && !buffer_uptodate(bh))
3095                        ret = -EIO;
3096        } else {
3097                unlock_buffer(bh);
3098        }
3099        return ret;
3100}
3101EXPORT_SYMBOL(__sync_dirty_buffer);
3102
3103int sync_dirty_buffer(struct buffer_head *bh)
3104{
3105        return __sync_dirty_buffer(bh, WRITE_SYNC);
3106}
3107EXPORT_SYMBOL(sync_dirty_buffer);
3108
3109/*
3110 * try_to_free_buffers() checks if all the buffers on this particular page
3111 * are unused, and releases them if so.
3112 *
3113 * Exclusion against try_to_free_buffers may be obtained by either
3114 * locking the page or by holding its mapping's private_lock.
3115 *
3116 * If the page is dirty but all the buffers are clean then we need to
3117 * be sure to mark the page clean as well.  This is because the page
3118 * may be against a block device, and a later reattachment of buffers
3119 * to a dirty page will set *all* buffers dirty.  Which would corrupt
3120 * filesystem data on the same device.
3121 *
3122 * The same applies to regular filesystem pages: if all the buffers are
3123 * clean then we set the page clean and proceed.  To do that, we require
3124 * total exclusion from __set_page_dirty_buffers().  That is obtained with
3125 * private_lock.
3126 *
3127 * try_to_free_buffers() is non-blocking.
3128 */
3129static inline int buffer_busy(struct buffer_head *bh)
3130{
3131        return atomic_read(&bh->b_count) |
3132                (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3133}
3134
3135static int
3136drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3137{
3138        struct buffer_head *head = page_buffers(page);
3139        struct buffer_head *bh;
3140
3141        bh = head;
3142        do {
3143                if (buffer_write_io_error(bh) && page->mapping)
3144                        set_bit(AS_EIO, &page->mapping->flags);
3145                if (buffer_busy(bh))
3146                        goto failed;
3147                bh = bh->b_this_page;
3148        } while (bh != head);
3149
3150        do {
3151                struct buffer_head *next = bh->b_this_page;
3152
3153                if (bh->b_assoc_map)
3154                        __remove_assoc_queue(bh);
3155                bh = next;
3156        } while (bh != head);
3157        *buffers_to_free = head;
3158        __clear_page_buffers(page);
3159        return 1;
3160failed:
3161        return 0;
3162}
3163
3164int try_to_free_buffers(struct page *page)
3165{
3166        struct address_space * const mapping = page->mapping;
3167        struct buffer_head *buffers_to_free = NULL;
3168        int ret = 0;
3169
3170        BUG_ON(!PageLocked(page));
3171        if (PageWriteback(page))
3172                return 0;
3173
3174        if (mapping == NULL) {          /* can this still happen? */
3175                ret = drop_buffers(page, &buffers_to_free);
3176                goto out;
3177        }
3178
3179        spin_lock(&mapping->private_lock);
3180        ret = drop_buffers(page, &buffers_to_free);
3181
3182        /*
3183         * If the filesystem writes its buffers by hand (eg ext3)
3184         * then we can have clean buffers against a dirty page.  We
3185         * clean the page here; otherwise the VM will never notice
3186         * that the filesystem did any IO at all.
3187         *
3188         * Also, during truncate, discard_buffer will have marked all
3189         * the page's buffers clean.  We discover that here and clean
3190         * the page also.
3191         *
3192         * private_lock must be held over this entire operation in order
3193         * to synchronise against __set_page_dirty_buffers and prevent the
3194         * dirty bit from being lost.
3195         */
3196        if (ret)
3197                cancel_dirty_page(page, PAGE_CACHE_SIZE);
3198        spin_unlock(&mapping->private_lock);
3199out:
3200        if (buffers_to_free) {
3201                struct buffer_head *bh = buffers_to_free;
3202
3203                do {
3204                        struct buffer_head *next = bh->b_this_page;
3205                        free_buffer_head(bh);
3206                        bh = next;
3207                } while (bh != buffers_to_free);
3208        }
3209        return ret;
3210}
3211EXPORT_SYMBOL(try_to_free_buffers);
3212
3213/*
3214 * There are no bdflush tunables left.  But distributions are
3215 * still running obsolete flush daemons, so we terminate them here.
3216 *
3217 * Use of bdflush() is deprecated and will be removed in a future kernel.
3218 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3219 */
3220SYSCALL_DEFINE2(bdflush, int, func, long, data)
3221{
3222        static int msg_count;
3223
3224        if (!capable(CAP_SYS_ADMIN))
3225                return -EPERM;
3226
3227        if (msg_count < 5) {
3228                msg_count++;
3229                printk(KERN_INFO
3230                        "warning: process `%s' used the obsolete bdflush"
3231                        " system call\n", current->comm);
3232                printk(KERN_INFO "Fix your initscripts?\n");
3233        }
3234
3235        if (func == 1)
3236                do_exit(0);
3237        return 0;
3238}
3239
3240/*
3241 * Buffer-head allocation
3242 */
3243static struct kmem_cache *bh_cachep __read_mostly;
3244
3245/*
3246 * Once the number of bh's in the machine exceeds this level, we start
3247 * stripping them in writeback.
3248 */
3249static unsigned long max_buffer_heads;
3250
3251int buffer_heads_over_limit;
3252
3253struct bh_accounting {
3254        int nr;                 /* Number of live bh's */
3255        int ratelimit;          /* Limit cacheline bouncing */
3256};
3257
3258static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3259
3260static void recalc_bh_state(void)
3261{
3262        int i;
3263        int tot = 0;
3264
3265        if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3266                return;
3267        __this_cpu_write(bh_accounting.ratelimit, 0);
3268        for_each_online_cpu(i)
3269                tot += per_cpu(bh_accounting, i).nr;
3270        buffer_heads_over_limit = (tot > max_buffer_heads);
3271}
3272
3273struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3274{
3275        struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3276        if (ret) {
3277                INIT_LIST_HEAD(&ret->b_assoc_buffers);
3278                preempt_disable();
3279                __this_cpu_inc(bh_accounting.nr);
3280                recalc_bh_state();
3281                preempt_enable();
3282        }
3283        return ret;
3284}
3285EXPORT_SYMBOL(alloc_buffer_head);
3286
3287void free_buffer_head(struct buffer_head *bh)
3288{
3289        BUG_ON(!list_empty(&bh->b_assoc_buffers));
3290        kmem_cache_free(bh_cachep, bh);
3291        preempt_disable();
3292        __this_cpu_dec(bh_accounting.nr);
3293        recalc_bh_state();
3294        preempt_enable();
3295}
3296EXPORT_SYMBOL(free_buffer_head);
3297
3298static void buffer_exit_cpu(int cpu)
3299{
3300        int i;
3301        struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3302
3303        for (i = 0; i < BH_LRU_SIZE; i++) {
3304                brelse(b->bhs[i]);
3305                b->bhs[i] = NULL;
3306        }
3307        this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3308        per_cpu(bh_accounting, cpu).nr = 0;
3309}
3310
3311static int buffer_cpu_notify(struct notifier_block *self,
3312                              unsigned long action, void *hcpu)
3313{
3314        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3315                buffer_exit_cpu((unsigned long)hcpu);
3316        return NOTIFY_OK;
3317}
3318
3319/**
3320 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3321 * @bh: struct buffer_head
3322 *
3323 * Return true if the buffer is up-to-date and false,
3324 * with the buffer locked, if not.
3325 */
3326int bh_uptodate_or_lock(struct buffer_head *bh)
3327{
3328        if (!buffer_uptodate(bh)) {
3329                lock_buffer(bh);
3330                if (!buffer_uptodate(bh))
3331                        return 0;
3332                unlock_buffer(bh);
3333        }
3334        return 1;
3335}
3336EXPORT_SYMBOL(bh_uptodate_or_lock);
3337
3338/**
3339 * bh_submit_read - Submit a locked buffer for reading
3340 * @bh: struct buffer_head
3341 *
3342 * Returns zero on success and -EIO on error.
3343 */
3344int bh_submit_read(struct buffer_head *bh)
3345{
3346        BUG_ON(!buffer_locked(bh));
3347
3348        if (buffer_uptodate(bh)) {
3349                unlock_buffer(bh);
3350                return 0;
3351        }
3352
3353        get_bh(bh);
3354        bh->b_end_io = end_buffer_read_sync;
3355        submit_bh(READ, bh);
3356        wait_on_buffer(bh);
3357        if (buffer_uptodate(bh))
3358                return 0;
3359        return -EIO;
3360}
3361EXPORT_SYMBOL(bh_submit_read);
3362
3363void __init buffer_init(void)
3364{
3365        unsigned long nrpages;
3366
3367        bh_cachep = kmem_cache_create("buffer_head",
3368                        sizeof(struct buffer_head), 0,
3369                                (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3370                                SLAB_MEM_SPREAD),
3371                                NULL);
3372
3373        /*
3374         * Limit the bh occupancy to 10% of ZONE_NORMAL
3375         */
3376        nrpages = (nr_free_buffer_pages() * 10) / 100;
3377        max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3378        hotcpu_notifier(buffer_cpu_notify, 0);
3379}
3380
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.