linux/fs/ext4/page-io.c
<<
>>
Prefs
   1/*
   2 * linux/fs/ext4/page-io.c
   3 *
   4 * This contains the new page_io functions for ext4
   5 *
   6 * Written by Theodore Ts'o, 2010.
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/time.h>
  11#include <linux/jbd2.h>
  12#include <linux/highuid.h>
  13#include <linux/pagemap.h>
  14#include <linux/quotaops.h>
  15#include <linux/string.h>
  16#include <linux/buffer_head.h>
  17#include <linux/writeback.h>
  18#include <linux/pagevec.h>
  19#include <linux/mpage.h>
  20#include <linux/namei.h>
  21#include <linux/uio.h>
  22#include <linux/bio.h>
  23#include <linux/workqueue.h>
  24#include <linux/kernel.h>
  25#include <linux/slab.h>
  26
  27#include "ext4_jbd2.h"
  28#include "xattr.h"
  29#include "acl.h"
  30#include "ext4_extents.h"
  31
  32static struct kmem_cache *io_page_cachep, *io_end_cachep;
  33
  34int __init ext4_init_pageio(void)
  35{
  36        io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
  37        if (io_page_cachep == NULL)
  38                return -ENOMEM;
  39        io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
  40        if (io_end_cachep == NULL) {
  41                kmem_cache_destroy(io_page_cachep);
  42                return -ENOMEM;
  43        }
  44        return 0;
  45}
  46
  47void ext4_exit_pageio(void)
  48{
  49        kmem_cache_destroy(io_end_cachep);
  50        kmem_cache_destroy(io_page_cachep);
  51}
  52
  53void ext4_ioend_wait(struct inode *inode)
  54{
  55        wait_queue_head_t *wq = ext4_ioend_wq(inode);
  56
  57        wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
  58}
  59
  60static void put_io_page(struct ext4_io_page *io_page)
  61{
  62        if (atomic_dec_and_test(&io_page->p_count)) {
  63                end_page_writeback(io_page->p_page);
  64                put_page(io_page->p_page);
  65                kmem_cache_free(io_page_cachep, io_page);
  66        }
  67}
  68
  69void ext4_free_io_end(ext4_io_end_t *io)
  70{
  71        int i;
  72
  73        BUG_ON(!io);
  74        BUG_ON(!list_empty(&io->list));
  75        BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
  76
  77        if (io->page)
  78                put_page(io->page);
  79        for (i = 0; i < io->num_io_pages; i++)
  80                put_io_page(io->pages[i]);
  81        io->num_io_pages = 0;
  82        if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
  83                wake_up_all(ext4_ioend_wq(io->inode));
  84        kmem_cache_free(io_end_cachep, io);
  85}
  86
  87/* check a range of space and convert unwritten extents to written. */
  88static int ext4_end_io(ext4_io_end_t *io)
  89{
  90        struct inode *inode = io->inode;
  91        loff_t offset = io->offset;
  92        ssize_t size = io->size;
  93        int ret = 0;
  94
  95        ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
  96                   "list->prev 0x%p\n",
  97                   io, inode->i_ino, io->list.next, io->list.prev);
  98
  99        ret = ext4_convert_unwritten_extents(inode, offset, size);
 100        if (ret < 0) {
 101                ext4_msg(inode->i_sb, KERN_EMERG,
 102                         "failed to convert unwritten extents to written "
 103                         "extents -- potential data loss!  "
 104                         "(inode %lu, offset %llu, size %zd, error %d)",
 105                         inode->i_ino, offset, size, ret);
 106        }
 107        if (io->iocb)
 108                aio_complete(io->iocb, io->result, 0);
 109
 110        if (io->flag & EXT4_IO_END_DIRECT)
 111                inode_dio_done(inode);
 112        /* Wake up anyone waiting on unwritten extent conversion */
 113        if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
 114                wake_up_all(ext4_ioend_wq(io->inode));
 115        return ret;
 116}
 117
 118static void dump_completed_IO(struct inode *inode)
 119{
 120#ifdef  EXT4FS_DEBUG
 121        struct list_head *cur, *before, *after;
 122        ext4_io_end_t *io, *io0, *io1;
 123        unsigned long flags;
 124
 125        if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
 126                ext4_debug("inode %lu completed_io list is empty\n",
 127                           inode->i_ino);
 128                return;
 129        }
 130
 131        ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
 132        list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
 133                cur = &io->list;
 134                before = cur->prev;
 135                io0 = container_of(before, ext4_io_end_t, list);
 136                after = cur->next;
 137                io1 = container_of(after, ext4_io_end_t, list);
 138
 139                ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
 140                            io, inode->i_ino, io0, io1);
 141        }
 142#endif
 143}
 144
 145/* Add the io_end to per-inode completed end_io list. */
 146void ext4_add_complete_io(ext4_io_end_t *io_end)
 147{
 148        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
 149        struct workqueue_struct *wq;
 150        unsigned long flags;
 151
 152        BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
 153        wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
 154
 155        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 156        if (list_empty(&ei->i_completed_io_list)) {
 157                io_end->flag |= EXT4_IO_END_QUEUED;
 158                queue_work(wq, &io_end->work);
 159        }
 160        list_add_tail(&io_end->list, &ei->i_completed_io_list);
 161        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 162}
 163
 164static int ext4_do_flush_completed_IO(struct inode *inode,
 165                                      ext4_io_end_t *work_io)
 166{
 167        ext4_io_end_t *io;
 168        struct list_head unwritten, complete, to_free;
 169        unsigned long flags;
 170        struct ext4_inode_info *ei = EXT4_I(inode);
 171        int err, ret = 0;
 172
 173        INIT_LIST_HEAD(&complete);
 174        INIT_LIST_HEAD(&to_free);
 175
 176        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 177        dump_completed_IO(inode);
 178        list_replace_init(&ei->i_completed_io_list, &unwritten);
 179        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 180
 181        while (!list_empty(&unwritten)) {
 182                io = list_entry(unwritten.next, ext4_io_end_t, list);
 183                BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
 184                list_del_init(&io->list);
 185
 186                err = ext4_end_io(io);
 187                if (unlikely(!ret && err))
 188                        ret = err;
 189
 190                list_add_tail(&io->list, &complete);
 191        }
 192        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 193        while (!list_empty(&complete)) {
 194                io = list_entry(complete.next, ext4_io_end_t, list);
 195                io->flag &= ~EXT4_IO_END_UNWRITTEN;
 196                /* end_io context can not be destroyed now because it still
 197                 * used by queued worker. Worker thread will destroy it later */
 198                if (io->flag & EXT4_IO_END_QUEUED)
 199                        list_del_init(&io->list);
 200                else
 201                        list_move(&io->list, &to_free);
 202        }
 203        /* If we are called from worker context, it is time to clear queued
 204         * flag, and destroy it's end_io if it was converted already */
 205        if (work_io) {
 206                work_io->flag &= ~EXT4_IO_END_QUEUED;
 207                if (!(work_io->flag & EXT4_IO_END_UNWRITTEN))
 208                        list_add_tail(&work_io->list, &to_free);
 209        }
 210        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 211
 212        while (!list_empty(&to_free)) {
 213                io = list_entry(to_free.next, ext4_io_end_t, list);
 214                list_del_init(&io->list);
 215                ext4_free_io_end(io);
 216        }
 217        return ret;
 218}
 219
 220/*
 221 * work on completed aio dio IO, to convert unwritten extents to extents
 222 */
 223static void ext4_end_io_work(struct work_struct *work)
 224{
 225        ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
 226        ext4_do_flush_completed_IO(io->inode, io);
 227}
 228
 229int ext4_flush_unwritten_io(struct inode *inode)
 230{
 231        int ret;
 232        WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
 233                     !(inode->i_state & I_FREEING));
 234        ret = ext4_do_flush_completed_IO(inode, NULL);
 235        ext4_unwritten_wait(inode);
 236        return ret;
 237}
 238
 239ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
 240{
 241        ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
 242        if (io) {
 243                atomic_inc(&EXT4_I(inode)->i_ioend_count);
 244                io->inode = inode;
 245                INIT_WORK(&io->work, ext4_end_io_work);
 246                INIT_LIST_HEAD(&io->list);
 247        }
 248        return io;
 249}
 250
 251/*
 252 * Print an buffer I/O error compatible with the fs/buffer.c.  This
 253 * provides compatibility with dmesg scrapers that look for a specific
 254 * buffer I/O error message.  We really need a unified error reporting
 255 * structure to userspace ala Digital Unix's uerf system, but it's
 256 * probably not going to happen in my lifetime, due to LKML politics...
 257 */
 258static void buffer_io_error(struct buffer_head *bh)
 259{
 260        char b[BDEVNAME_SIZE];
 261        printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
 262                        bdevname(bh->b_bdev, b),
 263                        (unsigned long long)bh->b_blocknr);
 264}
 265
 266static void ext4_end_bio(struct bio *bio, int error)
 267{
 268        ext4_io_end_t *io_end = bio->bi_private;
 269        struct inode *inode;
 270        int i;
 271        sector_t bi_sector = bio->bi_sector;
 272
 273        BUG_ON(!io_end);
 274        bio->bi_private = NULL;
 275        bio->bi_end_io = NULL;
 276        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
 277                error = 0;
 278        bio_put(bio);
 279
 280        for (i = 0; i < io_end->num_io_pages; i++) {
 281                struct page *page = io_end->pages[i]->p_page;
 282                struct buffer_head *bh, *head;
 283                loff_t offset;
 284                loff_t io_end_offset;
 285
 286                if (error) {
 287                        SetPageError(page);
 288                        set_bit(AS_EIO, &page->mapping->flags);
 289                        head = page_buffers(page);
 290                        BUG_ON(!head);
 291
 292                        io_end_offset = io_end->offset + io_end->size;
 293
 294                        offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
 295                        bh = head;
 296                        do {
 297                                if ((offset >= io_end->offset) &&
 298                                    (offset+bh->b_size <= io_end_offset))
 299                                        buffer_io_error(bh);
 300
 301                                offset += bh->b_size;
 302                                bh = bh->b_this_page;
 303                        } while (bh != head);
 304                }
 305
 306                put_io_page(io_end->pages[i]);
 307        }
 308        io_end->num_io_pages = 0;
 309        inode = io_end->inode;
 310
 311        if (error) {
 312                io_end->flag |= EXT4_IO_END_ERROR;
 313                ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
 314                             "(offset %llu size %ld starting block %llu)",
 315                             inode->i_ino,
 316                             (unsigned long long) io_end->offset,
 317                             (long) io_end->size,
 318                             (unsigned long long)
 319                             bi_sector >> (inode->i_blkbits - 9));
 320        }
 321
 322        if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
 323                ext4_free_io_end(io_end);
 324                return;
 325        }
 326
 327        ext4_add_complete_io(io_end);
 328}
 329
 330void ext4_io_submit(struct ext4_io_submit *io)
 331{
 332        struct bio *bio = io->io_bio;
 333
 334        if (bio) {
 335                bio_get(io->io_bio);
 336                submit_bio(io->io_op, io->io_bio);
 337                BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
 338                bio_put(io->io_bio);
 339        }
 340        io->io_bio = NULL;
 341        io->io_op = 0;
 342        io->io_end = NULL;
 343}
 344
 345static int io_submit_init(struct ext4_io_submit *io,
 346                          struct inode *inode,
 347                          struct writeback_control *wbc,
 348                          struct buffer_head *bh)
 349{
 350        ext4_io_end_t *io_end;
 351        struct page *page = bh->b_page;
 352        int nvecs = bio_get_nr_vecs(bh->b_bdev);
 353        struct bio *bio;
 354
 355        io_end = ext4_init_io_end(inode, GFP_NOFS);
 356        if (!io_end)
 357                return -ENOMEM;
 358        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
 359        bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 360        bio->bi_bdev = bh->b_bdev;
 361        bio->bi_private = io->io_end = io_end;
 362        bio->bi_end_io = ext4_end_bio;
 363
 364        io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
 365
 366        io->io_bio = bio;
 367        io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
 368        io->io_next_block = bh->b_blocknr;
 369        return 0;
 370}
 371
 372static int io_submit_add_bh(struct ext4_io_submit *io,
 373                            struct ext4_io_page *io_page,
 374                            struct inode *inode,
 375                            struct writeback_control *wbc,
 376                            struct buffer_head *bh)
 377{
 378        ext4_io_end_t *io_end;
 379        int ret;
 380
 381        if (buffer_new(bh)) {
 382                clear_buffer_new(bh);
 383                unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
 384        }
 385
 386        if (!buffer_mapped(bh) || buffer_delay(bh)) {
 387                if (!buffer_mapped(bh))
 388                        clear_buffer_dirty(bh);
 389                if (io->io_bio)
 390                        ext4_io_submit(io);
 391                return 0;
 392        }
 393
 394        if (io->io_bio && bh->b_blocknr != io->io_next_block) {
 395submit_and_retry:
 396                ext4_io_submit(io);
 397        }
 398        if (io->io_bio == NULL) {
 399                ret = io_submit_init(io, inode, wbc, bh);
 400                if (ret)
 401                        return ret;
 402        }
 403        io_end = io->io_end;
 404        if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
 405            (io_end->pages[io_end->num_io_pages-1] != io_page))
 406                goto submit_and_retry;
 407        if (buffer_uninit(bh))
 408                ext4_set_io_unwritten_flag(inode, io_end);
 409        io->io_end->size += bh->b_size;
 410        io->io_next_block++;
 411        ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
 412        if (ret != bh->b_size)
 413                goto submit_and_retry;
 414        if ((io_end->num_io_pages == 0) ||
 415            (io_end->pages[io_end->num_io_pages-1] != io_page)) {
 416                io_end->pages[io_end->num_io_pages++] = io_page;
 417                atomic_inc(&io_page->p_count);
 418        }
 419        return 0;
 420}
 421
 422int ext4_bio_write_page(struct ext4_io_submit *io,
 423                        struct page *page,
 424                        int len,
 425                        struct writeback_control *wbc)
 426{
 427        struct inode *inode = page->mapping->host;
 428        unsigned block_start, block_end, blocksize;
 429        struct ext4_io_page *io_page;
 430        struct buffer_head *bh, *head;
 431        int ret = 0;
 432
 433        blocksize = 1 << inode->i_blkbits;
 434
 435        BUG_ON(!PageLocked(page));
 436        BUG_ON(PageWriteback(page));
 437
 438        io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
 439        if (!io_page) {
 440                set_page_dirty(page);
 441                unlock_page(page);
 442                return -ENOMEM;
 443        }
 444        io_page->p_page = page;
 445        atomic_set(&io_page->p_count, 1);
 446        get_page(page);
 447        set_page_writeback(page);
 448        ClearPageError(page);
 449
 450        for (bh = head = page_buffers(page), block_start = 0;
 451             bh != head || !block_start;
 452             block_start = block_end, bh = bh->b_this_page) {
 453
 454                block_end = block_start + blocksize;
 455                if (block_start >= len) {
 456                        /*
 457                         * Comments copied from block_write_full_page_endio:
 458                         *
 459                         * The page straddles i_size.  It must be zeroed out on
 460                         * each and every writepage invocation because it may
 461                         * be mmapped.  "A file is mapped in multiples of the
 462                         * page size.  For a file that is not a multiple of
 463                         * the  page size, the remaining memory is zeroed when
 464                         * mapped, and writes to that region are not written
 465                         * out to the file."
 466                         */
 467                        zero_user_segment(page, block_start, block_end);
 468                        clear_buffer_dirty(bh);
 469                        set_buffer_uptodate(bh);
 470                        continue;
 471                }
 472                clear_buffer_dirty(bh);
 473                ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
 474                if (ret) {
 475                        /*
 476                         * We only get here on ENOMEM.  Not much else
 477                         * we can do but mark the page as dirty, and
 478                         * better luck next time.
 479                         */
 480                        set_page_dirty(page);
 481                        break;
 482                }
 483        }
 484        unlock_page(page);
 485        /*
 486         * If the page was truncated before we could do the writeback,
 487         * or we had a memory allocation error while trying to write
 488         * the first buffer head, we won't have submitted any pages for
 489         * I/O.  In that case we need to make sure we've cleared the
 490         * PageWriteback bit from the page to prevent the system from
 491         * wedging later on.
 492         */
 493        put_io_page(io_page);
 494        return ret;
 495}
 496
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.