linux/fs/jbd/commit.c
<<
>>
Prefs
   1/*
   2 * linux/fs/jbd/commit.c
   3 *
   4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   5 *
   6 * Copyright 1998 Red Hat corp --- All Rights Reserved
   7 *
   8 * This file is part of the Linux kernel and is made available under
   9 * the terms of the GNU General Public License, version 2, or at your
  10 * option, any later version, incorporated herein by reference.
  11 *
  12 * Journal commit routines for the generic filesystem journaling code;
  13 * part of the ext2fs journaling system.
  14 */
  15
  16#include <linux/time.h>
  17#include <linux/fs.h>
  18#include <linux/jbd.h>
  19#include <linux/errno.h>
  20#include <linux/mm.h>
  21#include <linux/pagemap.h>
  22#include <linux/bio.h>
  23#include <linux/blkdev.h>
  24#include <trace/events/jbd.h>
  25
  26/*
  27 * Default IO end handler for temporary BJ_IO buffer_heads.
  28 */
  29static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  30{
  31        BUFFER_TRACE(bh, "");
  32        if (uptodate)
  33                set_buffer_uptodate(bh);
  34        else
  35                clear_buffer_uptodate(bh);
  36        unlock_buffer(bh);
  37}
  38
  39/*
  40 * When an ext3-ordered file is truncated, it is possible that many pages are
  41 * not successfully freed, because they are attached to a committing transaction.
  42 * After the transaction commits, these pages are left on the LRU, with no
  43 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
  44 * by the VM, but their apparent absence upsets the VM accounting, and it makes
  45 * the numbers in /proc/meminfo look odd.
  46 *
  47 * So here, we have a buffer which has just come off the forget list.  Look to
  48 * see if we can strip all buffers from the backing page.
  49 *
  50 * Called under journal->j_list_lock.  The caller provided us with a ref
  51 * against the buffer, and we drop that here.
  52 */
  53static void release_buffer_page(struct buffer_head *bh)
  54{
  55        struct page *page;
  56
  57        if (buffer_dirty(bh))
  58                goto nope;
  59        if (atomic_read(&bh->b_count) != 1)
  60                goto nope;
  61        page = bh->b_page;
  62        if (!page)
  63                goto nope;
  64        if (page->mapping)
  65                goto nope;
  66
  67        /* OK, it's a truncated page */
  68        if (!trylock_page(page))
  69                goto nope;
  70
  71        page_cache_get(page);
  72        __brelse(bh);
  73        try_to_free_buffers(page);
  74        unlock_page(page);
  75        page_cache_release(page);
  76        return;
  77
  78nope:
  79        __brelse(bh);
  80}
  81
  82/*
  83 * Decrement reference counter for data buffer. If it has been marked
  84 * 'BH_Freed', release it and the page to which it belongs if possible.
  85 */
  86static void release_data_buffer(struct buffer_head *bh)
  87{
  88        if (buffer_freed(bh)) {
  89                WARN_ON_ONCE(buffer_dirty(bh));
  90                clear_buffer_freed(bh);
  91                clear_buffer_mapped(bh);
  92                clear_buffer_new(bh);
  93                clear_buffer_req(bh);
  94                bh->b_bdev = NULL;
  95                release_buffer_page(bh);
  96        } else
  97                put_bh(bh);
  98}
  99
 100/*
 101 * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
 102 * held.  For ranking reasons we must trylock.  If we lose, schedule away and
 103 * return 0.  j_list_lock is dropped in this case.
 104 */
 105static int inverted_lock(journal_t *journal, struct buffer_head *bh)
 106{
 107        if (!jbd_trylock_bh_state(bh)) {
 108                spin_unlock(&journal->j_list_lock);
 109                schedule();
 110                return 0;
 111        }
 112        return 1;
 113}
 114
 115/* Done it all: now write the commit record.  We should have
 116 * cleaned up our previous buffers by now, so if we are in abort
 117 * mode we can now just skip the rest of the journal write
 118 * entirely.
 119 *
 120 * Returns 1 if the journal needs to be aborted or 0 on success
 121 */
 122static int journal_write_commit_record(journal_t *journal,
 123                                        transaction_t *commit_transaction)
 124{
 125        struct journal_head *descriptor;
 126        struct buffer_head *bh;
 127        journal_header_t *header;
 128        int ret;
 129
 130        if (is_journal_aborted(journal))
 131                return 0;
 132
 133        descriptor = journal_get_descriptor_buffer(journal);
 134        if (!descriptor)
 135                return 1;
 136
 137        bh = jh2bh(descriptor);
 138
 139        header = (journal_header_t *)(bh->b_data);
 140        header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
 141        header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
 142        header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
 143
 144        JBUFFER_TRACE(descriptor, "write commit block");
 145        set_buffer_dirty(bh);
 146
 147        if (journal->j_flags & JFS_BARRIER)
 148                ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
 149        else
 150                ret = sync_dirty_buffer(bh);
 151
 152        put_bh(bh);             /* One for getblk() */
 153        journal_put_journal_head(descriptor);
 154
 155        return (ret == -EIO);
 156}
 157
 158static void journal_do_submit_data(struct buffer_head **wbuf, int bufs,
 159                                   int write_op)
 160{
 161        int i;
 162
 163        for (i = 0; i < bufs; i++) {
 164                wbuf[i]->b_end_io = end_buffer_write_sync;
 165                /* We use-up our safety reference in submit_bh() */
 166                submit_bh(write_op, wbuf[i]);
 167        }
 168}
 169
 170/*
 171 *  Submit all the data buffers to disk
 172 */
 173static int journal_submit_data_buffers(journal_t *journal,
 174                                       transaction_t *commit_transaction,
 175                                       int write_op)
 176{
 177        struct journal_head *jh;
 178        struct buffer_head *bh;
 179        int locked;
 180        int bufs = 0;
 181        struct buffer_head **wbuf = journal->j_wbuf;
 182        int err = 0;
 183
 184        /*
 185         * Whenever we unlock the journal and sleep, things can get added
 186         * onto ->t_sync_datalist, so we have to keep looping back to
 187         * write_out_data until we *know* that the list is empty.
 188         *
 189         * Cleanup any flushed data buffers from the data list.  Even in
 190         * abort mode, we want to flush this out as soon as possible.
 191         */
 192write_out_data:
 193        cond_resched();
 194        spin_lock(&journal->j_list_lock);
 195
 196        while (commit_transaction->t_sync_datalist) {
 197                jh = commit_transaction->t_sync_datalist;
 198                bh = jh2bh(jh);
 199                locked = 0;
 200
 201                /* Get reference just to make sure buffer does not disappear
 202                 * when we are forced to drop various locks */
 203                get_bh(bh);
 204                /* If the buffer is dirty, we need to submit IO and hence
 205                 * we need the buffer lock. We try to lock the buffer without
 206                 * blocking. If we fail, we need to drop j_list_lock and do
 207                 * blocking lock_buffer().
 208                 */
 209                if (buffer_dirty(bh)) {
 210                        if (!trylock_buffer(bh)) {
 211                                BUFFER_TRACE(bh, "needs blocking lock");
 212                                spin_unlock(&journal->j_list_lock);
 213                                trace_jbd_do_submit_data(journal,
 214                                                     commit_transaction);
 215                                /* Write out all data to prevent deadlocks */
 216                                journal_do_submit_data(wbuf, bufs, write_op);
 217                                bufs = 0;
 218                                lock_buffer(bh);
 219                                spin_lock(&journal->j_list_lock);
 220                        }
 221                        locked = 1;
 222                }
 223                /* We have to get bh_state lock. Again out of order, sigh. */
 224                if (!inverted_lock(journal, bh)) {
 225                        jbd_lock_bh_state(bh);
 226                        spin_lock(&journal->j_list_lock);
 227                }
 228                /* Someone already cleaned up the buffer? */
 229                if (!buffer_jbd(bh) || bh2jh(bh) != jh
 230                        || jh->b_transaction != commit_transaction
 231                        || jh->b_jlist != BJ_SyncData) {
 232                        jbd_unlock_bh_state(bh);
 233                        if (locked)
 234                                unlock_buffer(bh);
 235                        BUFFER_TRACE(bh, "already cleaned up");
 236                        release_data_buffer(bh);
 237                        continue;
 238                }
 239                if (locked && test_clear_buffer_dirty(bh)) {
 240                        BUFFER_TRACE(bh, "needs writeout, adding to array");
 241                        wbuf[bufs++] = bh;
 242                        __journal_file_buffer(jh, commit_transaction,
 243                                                BJ_Locked);
 244                        jbd_unlock_bh_state(bh);
 245                        if (bufs == journal->j_wbufsize) {
 246                                spin_unlock(&journal->j_list_lock);
 247                                trace_jbd_do_submit_data(journal,
 248                                                     commit_transaction);
 249                                journal_do_submit_data(wbuf, bufs, write_op);
 250                                bufs = 0;
 251                                goto write_out_data;
 252                        }
 253                } else if (!locked && buffer_locked(bh)) {
 254                        __journal_file_buffer(jh, commit_transaction,
 255                                                BJ_Locked);
 256                        jbd_unlock_bh_state(bh);
 257                        put_bh(bh);
 258                } else {
 259                        BUFFER_TRACE(bh, "writeout complete: unfile");
 260                        if (unlikely(!buffer_uptodate(bh)))
 261                                err = -EIO;
 262                        __journal_unfile_buffer(jh);
 263                        jbd_unlock_bh_state(bh);
 264                        if (locked)
 265                                unlock_buffer(bh);
 266                        release_data_buffer(bh);
 267                }
 268
 269                if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
 270                        spin_unlock(&journal->j_list_lock);
 271                        goto write_out_data;
 272                }
 273        }
 274        spin_unlock(&journal->j_list_lock);
 275        trace_jbd_do_submit_data(journal, commit_transaction);
 276        journal_do_submit_data(wbuf, bufs, write_op);
 277
 278        return err;
 279}
 280
 281/*
 282 * journal_commit_transaction
 283 *
 284 * The primary function for committing a transaction to the log.  This
 285 * function is called by the journal thread to begin a complete commit.
 286 */
 287void journal_commit_transaction(journal_t *journal)
 288{
 289        transaction_t *commit_transaction;
 290        struct journal_head *jh, *new_jh, *descriptor;
 291        struct buffer_head **wbuf = journal->j_wbuf;
 292        int bufs;
 293        int flags;
 294        int err;
 295        unsigned int blocknr;
 296        ktime_t start_time;
 297        u64 commit_time;
 298        char *tagp = NULL;
 299        journal_header_t *header;
 300        journal_block_tag_t *tag = NULL;
 301        int space_left = 0;
 302        int first_tag = 0;
 303        int tag_flag;
 304        int i;
 305        struct blk_plug plug;
 306        int write_op = WRITE;
 307
 308        /*
 309         * First job: lock down the current transaction and wait for
 310         * all outstanding updates to complete.
 311         */
 312
 313        /* Do we need to erase the effects of a prior journal_flush? */
 314        if (journal->j_flags & JFS_FLUSHED) {
 315                jbd_debug(3, "super block updated\n");
 316                mutex_lock(&journal->j_checkpoint_mutex);
 317                /*
 318                 * We hold j_checkpoint_mutex so tail cannot change under us.
 319                 * We don't need any special data guarantees for writing sb
 320                 * since journal is empty and it is ok for write to be
 321                 * flushed only with transaction commit.
 322                 */
 323                journal_update_sb_log_tail(journal, journal->j_tail_sequence,
 324                                           journal->j_tail, WRITE_SYNC);
 325                mutex_unlock(&journal->j_checkpoint_mutex);
 326        } else {
 327                jbd_debug(3, "superblock not updated\n");
 328        }
 329
 330        J_ASSERT(journal->j_running_transaction != NULL);
 331        J_ASSERT(journal->j_committing_transaction == NULL);
 332
 333        commit_transaction = journal->j_running_transaction;
 334        J_ASSERT(commit_transaction->t_state == T_RUNNING);
 335
 336        trace_jbd_start_commit(journal, commit_transaction);
 337        jbd_debug(1, "JBD: starting commit of transaction %d\n",
 338                        commit_transaction->t_tid);
 339
 340        spin_lock(&journal->j_state_lock);
 341        commit_transaction->t_state = T_LOCKED;
 342
 343        trace_jbd_commit_locking(journal, commit_transaction);
 344        spin_lock(&commit_transaction->t_handle_lock);
 345        while (commit_transaction->t_updates) {
 346                DEFINE_WAIT(wait);
 347
 348                prepare_to_wait(&journal->j_wait_updates, &wait,
 349                                        TASK_UNINTERRUPTIBLE);
 350                if (commit_transaction->t_updates) {
 351                        spin_unlock(&commit_transaction->t_handle_lock);
 352                        spin_unlock(&journal->j_state_lock);
 353                        schedule();
 354                        spin_lock(&journal->j_state_lock);
 355                        spin_lock(&commit_transaction->t_handle_lock);
 356                }
 357                finish_wait(&journal->j_wait_updates, &wait);
 358        }
 359        spin_unlock(&commit_transaction->t_handle_lock);
 360
 361        J_ASSERT (commit_transaction->t_outstanding_credits <=
 362                        journal->j_max_transaction_buffers);
 363
 364        /*
 365         * First thing we are allowed to do is to discard any remaining
 366         * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
 367         * that there are no such buffers: if a large filesystem
 368         * operation like a truncate needs to split itself over multiple
 369         * transactions, then it may try to do a journal_restart() while
 370         * there are still BJ_Reserved buffers outstanding.  These must
 371         * be released cleanly from the current transaction.
 372         *
 373         * In this case, the filesystem must still reserve write access
 374         * again before modifying the buffer in the new transaction, but
 375         * we do not require it to remember exactly which old buffers it
 376         * has reserved.  This is consistent with the existing behaviour
 377         * that multiple journal_get_write_access() calls to the same
 378         * buffer are perfectly permissible.
 379         */
 380        while (commit_transaction->t_reserved_list) {
 381                jh = commit_transaction->t_reserved_list;
 382                JBUFFER_TRACE(jh, "reserved, unused: refile");
 383                /*
 384                 * A journal_get_undo_access()+journal_release_buffer() may
 385                 * leave undo-committed data.
 386                 */
 387                if (jh->b_committed_data) {
 388                        struct buffer_head *bh = jh2bh(jh);
 389
 390                        jbd_lock_bh_state(bh);
 391                        jbd_free(jh->b_committed_data, bh->b_size);
 392                        jh->b_committed_data = NULL;
 393                        jbd_unlock_bh_state(bh);
 394                }
 395                journal_refile_buffer(journal, jh);
 396        }
 397
 398        /*
 399         * Now try to drop any written-back buffers from the journal's
 400         * checkpoint lists.  We do this *before* commit because it potentially
 401         * frees some memory
 402         */
 403        spin_lock(&journal->j_list_lock);
 404        __journal_clean_checkpoint_list(journal);
 405        spin_unlock(&journal->j_list_lock);
 406
 407        jbd_debug (3, "JBD: commit phase 1\n");
 408
 409        /*
 410         * Clear revoked flag to reflect there is no revoked buffers
 411         * in the next transaction which is going to be started.
 412         */
 413        journal_clear_buffer_revoked_flags(journal);
 414
 415        /*
 416         * Switch to a new revoke table.
 417         */
 418        journal_switch_revoke_table(journal);
 419
 420        trace_jbd_commit_flushing(journal, commit_transaction);
 421        commit_transaction->t_state = T_FLUSH;
 422        journal->j_committing_transaction = commit_transaction;
 423        journal->j_running_transaction = NULL;
 424        start_time = ktime_get();
 425        commit_transaction->t_log_start = journal->j_head;
 426        wake_up(&journal->j_wait_transaction_locked);
 427        spin_unlock(&journal->j_state_lock);
 428
 429        jbd_debug (3, "JBD: commit phase 2\n");
 430
 431        if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
 432                write_op = WRITE_SYNC;
 433
 434        /*
 435         * Now start flushing things to disk, in the order they appear
 436         * on the transaction lists.  Data blocks go first.
 437         */
 438        blk_start_plug(&plug);
 439        err = journal_submit_data_buffers(journal, commit_transaction,
 440                                          write_op);
 441        blk_finish_plug(&plug);
 442
 443        /*
 444         * Wait for all previously submitted IO to complete.
 445         */
 446        spin_lock(&journal->j_list_lock);
 447        while (commit_transaction->t_locked_list) {
 448                struct buffer_head *bh;
 449
 450                jh = commit_transaction->t_locked_list->b_tprev;
 451                bh = jh2bh(jh);
 452                get_bh(bh);
 453                if (buffer_locked(bh)) {
 454                        spin_unlock(&journal->j_list_lock);
 455                        wait_on_buffer(bh);
 456                        spin_lock(&journal->j_list_lock);
 457                }
 458                if (unlikely(!buffer_uptodate(bh))) {
 459                        if (!trylock_page(bh->b_page)) {
 460                                spin_unlock(&journal->j_list_lock);
 461                                lock_page(bh->b_page);
 462                                spin_lock(&journal->j_list_lock);
 463                        }
 464                        if (bh->b_page->mapping)
 465                                set_bit(AS_EIO, &bh->b_page->mapping->flags);
 466
 467                        unlock_page(bh->b_page);
 468                        SetPageError(bh->b_page);
 469                        err = -EIO;
 470                }
 471                if (!inverted_lock(journal, bh)) {
 472                        put_bh(bh);
 473                        spin_lock(&journal->j_list_lock);
 474                        continue;
 475                }
 476                if (buffer_jbd(bh) && bh2jh(bh) == jh &&
 477                    jh->b_transaction == commit_transaction &&
 478                    jh->b_jlist == BJ_Locked)
 479                        __journal_unfile_buffer(jh);
 480                jbd_unlock_bh_state(bh);
 481                release_data_buffer(bh);
 482                cond_resched_lock(&journal->j_list_lock);
 483        }
 484        spin_unlock(&journal->j_list_lock);
 485
 486        if (err) {
 487                char b[BDEVNAME_SIZE];
 488
 489                printk(KERN_WARNING
 490                        "JBD: Detected IO errors while flushing file data "
 491                        "on %s\n", bdevname(journal->j_fs_dev, b));
 492                if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
 493                        journal_abort(journal, err);
 494                err = 0;
 495        }
 496
 497        blk_start_plug(&plug);
 498
 499        journal_write_revoke_records(journal, commit_transaction, write_op);
 500
 501        /*
 502         * If we found any dirty or locked buffers, then we should have
 503         * looped back up to the write_out_data label.  If there weren't
 504         * any then journal_clean_data_list should have wiped the list
 505         * clean by now, so check that it is in fact empty.
 506         */
 507        J_ASSERT (commit_transaction->t_sync_datalist == NULL);
 508
 509        jbd_debug (3, "JBD: commit phase 3\n");
 510
 511        /*
 512         * Way to go: we have now written out all of the data for a
 513         * transaction!  Now comes the tricky part: we need to write out
 514         * metadata.  Loop over the transaction's entire buffer list:
 515         */
 516        spin_lock(&journal->j_state_lock);
 517        commit_transaction->t_state = T_COMMIT;
 518        spin_unlock(&journal->j_state_lock);
 519
 520        trace_jbd_commit_logging(journal, commit_transaction);
 521        J_ASSERT(commit_transaction->t_nr_buffers <=
 522                 commit_transaction->t_outstanding_credits);
 523
 524        descriptor = NULL;
 525        bufs = 0;
 526        while (commit_transaction->t_buffers) {
 527
 528                /* Find the next buffer to be journaled... */
 529
 530                jh = commit_transaction->t_buffers;
 531
 532                /* If we're in abort mode, we just un-journal the buffer and
 533                   release it. */
 534
 535                if (is_journal_aborted(journal)) {
 536                        clear_buffer_jbddirty(jh2bh(jh));
 537                        JBUFFER_TRACE(jh, "journal is aborting: refile");
 538                        journal_refile_buffer(journal, jh);
 539                        /* If that was the last one, we need to clean up
 540                         * any descriptor buffers which may have been
 541                         * already allocated, even if we are now
 542                         * aborting. */
 543                        if (!commit_transaction->t_buffers)
 544                                goto start_journal_io;
 545                        continue;
 546                }
 547
 548                /* Make sure we have a descriptor block in which to
 549                   record the metadata buffer. */
 550
 551                if (!descriptor) {
 552                        struct buffer_head *bh;
 553
 554                        J_ASSERT (bufs == 0);
 555
 556                        jbd_debug(4, "JBD: get descriptor\n");
 557
 558                        descriptor = journal_get_descriptor_buffer(journal);
 559                        if (!descriptor) {
 560                                journal_abort(journal, -EIO);
 561                                continue;
 562                        }
 563
 564                        bh = jh2bh(descriptor);
 565                        jbd_debug(4, "JBD: got buffer %llu (%p)\n",
 566                                (unsigned long long)bh->b_blocknr, bh->b_data);
 567                        header = (journal_header_t *)&bh->b_data[0];
 568                        header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
 569                        header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
 570                        header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
 571
 572                        tagp = &bh->b_data[sizeof(journal_header_t)];
 573                        space_left = bh->b_size - sizeof(journal_header_t);
 574                        first_tag = 1;
 575                        set_buffer_jwrite(bh);
 576                        set_buffer_dirty(bh);
 577                        wbuf[bufs++] = bh;
 578
 579                        /* Record it so that we can wait for IO
 580                           completion later */
 581                        BUFFER_TRACE(bh, "ph3: file as descriptor");
 582                        journal_file_buffer(descriptor, commit_transaction,
 583                                        BJ_LogCtl);
 584                }
 585
 586                /* Where is the buffer to be written? */
 587
 588                err = journal_next_log_block(journal, &blocknr);
 589                /* If the block mapping failed, just abandon the buffer
 590                   and repeat this loop: we'll fall into the
 591                   refile-on-abort condition above. */
 592                if (err) {
 593                        journal_abort(journal, err);
 594                        continue;
 595                }
 596
 597                /*
 598                 * start_this_handle() uses t_outstanding_credits to determine
 599                 * the free space in the log, but this counter is changed
 600                 * by journal_next_log_block() also.
 601                 */
 602                commit_transaction->t_outstanding_credits--;
 603
 604                /* Bump b_count to prevent truncate from stumbling over
 605                   the shadowed buffer!  @@@ This can go if we ever get
 606                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
 607                get_bh(jh2bh(jh));
 608
 609                /* Make a temporary IO buffer with which to write it out
 610                   (this will requeue both the metadata buffer and the
 611                   temporary IO buffer). new_bh goes on BJ_IO*/
 612
 613                set_buffer_jwrite(jh2bh(jh));
 614                /*
 615                 * akpm: journal_write_metadata_buffer() sets
 616                 * new_bh->b_transaction to commit_transaction.
 617                 * We need to clean this up before we release new_bh
 618                 * (which is of type BJ_IO)
 619                 */
 620                JBUFFER_TRACE(jh, "ph3: write metadata");
 621                flags = journal_write_metadata_buffer(commit_transaction,
 622                                                      jh, &new_jh, blocknr);
 623                set_buffer_jwrite(jh2bh(new_jh));
 624                wbuf[bufs++] = jh2bh(new_jh);
 625
 626                /* Record the new block's tag in the current descriptor
 627                   buffer */
 628
 629                tag_flag = 0;
 630                if (flags & 1)
 631                        tag_flag |= JFS_FLAG_ESCAPE;
 632                if (!first_tag)
 633                        tag_flag |= JFS_FLAG_SAME_UUID;
 634
 635                tag = (journal_block_tag_t *) tagp;
 636                tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
 637                tag->t_flags = cpu_to_be32(tag_flag);
 638                tagp += sizeof(journal_block_tag_t);
 639                space_left -= sizeof(journal_block_tag_t);
 640
 641                if (first_tag) {
 642                        memcpy (tagp, journal->j_uuid, 16);
 643                        tagp += 16;
 644                        space_left -= 16;
 645                        first_tag = 0;
 646                }
 647
 648                /* If there's no more to do, or if the descriptor is full,
 649                   let the IO rip! */
 650
 651                if (bufs == journal->j_wbufsize ||
 652                    commit_transaction->t_buffers == NULL ||
 653                    space_left < sizeof(journal_block_tag_t) + 16) {
 654
 655                        jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
 656
 657                        /* Write an end-of-descriptor marker before
 658                           submitting the IOs.  "tag" still points to
 659                           the last tag we set up. */
 660
 661                        tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
 662
 663start_journal_io:
 664                        for (i = 0; i < bufs; i++) {
 665                                struct buffer_head *bh = wbuf[i];
 666                                lock_buffer(bh);
 667                                clear_buffer_dirty(bh);
 668                                set_buffer_uptodate(bh);
 669                                bh->b_end_io = journal_end_buffer_io_sync;
 670                                submit_bh(write_op, bh);
 671                        }
 672                        cond_resched();
 673
 674                        /* Force a new descriptor to be generated next
 675                           time round the loop. */
 676                        descriptor = NULL;
 677                        bufs = 0;
 678                }
 679        }
 680
 681        blk_finish_plug(&plug);
 682
 683        /* Lo and behold: we have just managed to send a transaction to
 684           the log.  Before we can commit it, wait for the IO so far to
 685           complete.  Control buffers being written are on the
 686           transaction's t_log_list queue, and metadata buffers are on
 687           the t_iobuf_list queue.
 688
 689           Wait for the buffers in reverse order.  That way we are
 690           less likely to be woken up until all IOs have completed, and
 691           so we incur less scheduling load.
 692        */
 693
 694        jbd_debug(3, "JBD: commit phase 4\n");
 695
 696        /*
 697         * akpm: these are BJ_IO, and j_list_lock is not needed.
 698         * See __journal_try_to_free_buffer.
 699         */
 700wait_for_iobuf:
 701        while (commit_transaction->t_iobuf_list != NULL) {
 702                struct buffer_head *bh;
 703
 704                jh = commit_transaction->t_iobuf_list->b_tprev;
 705                bh = jh2bh(jh);
 706                if (buffer_locked(bh)) {
 707                        wait_on_buffer(bh);
 708                        goto wait_for_iobuf;
 709                }
 710                if (cond_resched())
 711                        goto wait_for_iobuf;
 712
 713                if (unlikely(!buffer_uptodate(bh)))
 714                        err = -EIO;
 715
 716                clear_buffer_jwrite(bh);
 717
 718                JBUFFER_TRACE(jh, "ph4: unfile after journal write");
 719                journal_unfile_buffer(journal, jh);
 720
 721                /*
 722                 * ->t_iobuf_list should contain only dummy buffer_heads
 723                 * which were created by journal_write_metadata_buffer().
 724                 */
 725                BUFFER_TRACE(bh, "dumping temporary bh");
 726                journal_put_journal_head(jh);
 727                __brelse(bh);
 728                J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 729                free_buffer_head(bh);
 730
 731                /* We also have to unlock and free the corresponding
 732                   shadowed buffer */
 733                jh = commit_transaction->t_shadow_list->b_tprev;
 734                bh = jh2bh(jh);
 735                clear_buffer_jwrite(bh);
 736                J_ASSERT_BH(bh, buffer_jbddirty(bh));
 737
 738                /* The metadata is now released for reuse, but we need
 739                   to remember it against this transaction so that when
 740                   we finally commit, we can do any checkpointing
 741                   required. */
 742                JBUFFER_TRACE(jh, "file as BJ_Forget");
 743                journal_file_buffer(jh, commit_transaction, BJ_Forget);
 744                /*
 745                 * Wake up any transactions which were waiting for this
 746                 * IO to complete. The barrier must be here so that changes
 747                 * by journal_file_buffer() take effect before wake_up_bit()
 748                 * does the waitqueue check.
 749                 */
 750                smp_mb();
 751                wake_up_bit(&bh->b_state, BH_Unshadow);
 752                JBUFFER_TRACE(jh, "brelse shadowed buffer");
 753                __brelse(bh);
 754        }
 755
 756        J_ASSERT (commit_transaction->t_shadow_list == NULL);
 757
 758        jbd_debug(3, "JBD: commit phase 5\n");
 759
 760        /* Here we wait for the revoke record and descriptor record buffers */
 761 wait_for_ctlbuf:
 762        while (commit_transaction->t_log_list != NULL) {
 763                struct buffer_head *bh;
 764
 765                jh = commit_transaction->t_log_list->b_tprev;
 766                bh = jh2bh(jh);
 767                if (buffer_locked(bh)) {
 768                        wait_on_buffer(bh);
 769                        goto wait_for_ctlbuf;
 770                }
 771                if (cond_resched())
 772                        goto wait_for_ctlbuf;
 773
 774                if (unlikely(!buffer_uptodate(bh)))
 775                        err = -EIO;
 776
 777                BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 778                clear_buffer_jwrite(bh);
 779                journal_unfile_buffer(journal, jh);
 780                journal_put_journal_head(jh);
 781                __brelse(bh);           /* One for getblk */
 782                /* AKPM: bforget here */
 783        }
 784
 785        if (err)
 786                journal_abort(journal, err);
 787
 788        jbd_debug(3, "JBD: commit phase 6\n");
 789
 790        /* All metadata is written, now write commit record and do cleanup */
 791        spin_lock(&journal->j_state_lock);
 792        J_ASSERT(commit_transaction->t_state == T_COMMIT);
 793        commit_transaction->t_state = T_COMMIT_RECORD;
 794        spin_unlock(&journal->j_state_lock);
 795
 796        if (journal_write_commit_record(journal, commit_transaction))
 797                err = -EIO;
 798
 799        if (err)
 800                journal_abort(journal, err);
 801
 802        /* End of a transaction!  Finally, we can do checkpoint
 803           processing: any buffers committed as a result of this
 804           transaction can be removed from any checkpoint list it was on
 805           before. */
 806
 807        jbd_debug(3, "JBD: commit phase 7\n");
 808
 809        J_ASSERT(commit_transaction->t_sync_datalist == NULL);
 810        J_ASSERT(commit_transaction->t_buffers == NULL);
 811        J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
 812        J_ASSERT(commit_transaction->t_iobuf_list == NULL);
 813        J_ASSERT(commit_transaction->t_shadow_list == NULL);
 814        J_ASSERT(commit_transaction->t_log_list == NULL);
 815
 816restart_loop:
 817        /*
 818         * As there are other places (journal_unmap_buffer()) adding buffers
 819         * to this list we have to be careful and hold the j_list_lock.
 820         */
 821        spin_lock(&journal->j_list_lock);
 822        while (commit_transaction->t_forget) {
 823                transaction_t *cp_transaction;
 824                struct buffer_head *bh;
 825                int try_to_free = 0;
 826
 827                jh = commit_transaction->t_forget;
 828                spin_unlock(&journal->j_list_lock);
 829                bh = jh2bh(jh);
 830                /*
 831                 * Get a reference so that bh cannot be freed before we are
 832                 * done with it.
 833                 */
 834                get_bh(bh);
 835                jbd_lock_bh_state(bh);
 836                J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
 837                        jh->b_transaction == journal->j_running_transaction);
 838
 839                /*
 840                 * If there is undo-protected committed data against
 841                 * this buffer, then we can remove it now.  If it is a
 842                 * buffer needing such protection, the old frozen_data
 843                 * field now points to a committed version of the
 844                 * buffer, so rotate that field to the new committed
 845                 * data.
 846                 *
 847                 * Otherwise, we can just throw away the frozen data now.
 848                 */
 849                if (jh->b_committed_data) {
 850                        jbd_free(jh->b_committed_data, bh->b_size);
 851                        jh->b_committed_data = NULL;
 852                        if (jh->b_frozen_data) {
 853                                jh->b_committed_data = jh->b_frozen_data;
 854                                jh->b_frozen_data = NULL;
 855                        }
 856                } else if (jh->b_frozen_data) {
 857                        jbd_free(jh->b_frozen_data, bh->b_size);
 858                        jh->b_frozen_data = NULL;
 859                }
 860
 861                spin_lock(&journal->j_list_lock);
 862                cp_transaction = jh->b_cp_transaction;
 863                if (cp_transaction) {
 864                        JBUFFER_TRACE(jh, "remove from old cp transaction");
 865                        __journal_remove_checkpoint(jh);
 866                }
 867
 868                /* Only re-checkpoint the buffer_head if it is marked
 869                 * dirty.  If the buffer was added to the BJ_Forget list
 870                 * by journal_forget, it may no longer be dirty and
 871                 * there's no point in keeping a checkpoint record for
 872                 * it. */
 873
 874                /*
 875                 * A buffer which has been freed while still being journaled by
 876                 * a previous transaction.
 877                 */
 878                if (buffer_freed(bh)) {
 879                        /*
 880                         * If the running transaction is the one containing
 881                         * "add to orphan" operation (b_next_transaction !=
 882                         * NULL), we have to wait for that transaction to
 883                         * commit before we can really get rid of the buffer.
 884                         * So just clear b_modified to not confuse transaction
 885                         * credit accounting and refile the buffer to
 886                         * BJ_Forget of the running transaction. If the just
 887                         * committed transaction contains "add to orphan"
 888                         * operation, we can completely invalidate the buffer
 889                         * now. We are rather throughout in that since the
 890                         * buffer may be still accessible when blocksize <
 891                         * pagesize and it is attached to the last partial
 892                         * page.
 893                         */
 894                        jh->b_modified = 0;
 895                        if (!jh->b_next_transaction) {
 896                                clear_buffer_freed(bh);
 897                                clear_buffer_jbddirty(bh);
 898                                clear_buffer_mapped(bh);
 899                                clear_buffer_new(bh);
 900                                clear_buffer_req(bh);
 901                                bh->b_bdev = NULL;
 902                        }
 903                }
 904
 905                if (buffer_jbddirty(bh)) {
 906                        JBUFFER_TRACE(jh, "add to new checkpointing trans");
 907                        __journal_insert_checkpoint(jh, commit_transaction);
 908                        if (is_journal_aborted(journal))
 909                                clear_buffer_jbddirty(bh);
 910                } else {
 911                        J_ASSERT_BH(bh, !buffer_dirty(bh));
 912                        /*
 913                         * The buffer on BJ_Forget list and not jbddirty means
 914                         * it has been freed by this transaction and hence it
 915                         * could not have been reallocated until this
 916                         * transaction has committed. *BUT* it could be
 917                         * reallocated once we have written all the data to
 918                         * disk and before we process the buffer on BJ_Forget
 919                         * list.
 920                         */
 921                        if (!jh->b_next_transaction)
 922                                try_to_free = 1;
 923                }
 924                JBUFFER_TRACE(jh, "refile or unfile freed buffer");
 925                __journal_refile_buffer(jh);
 926                jbd_unlock_bh_state(bh);
 927                if (try_to_free)
 928                        release_buffer_page(bh);
 929                else
 930                        __brelse(bh);
 931                cond_resched_lock(&journal->j_list_lock);
 932        }
 933        spin_unlock(&journal->j_list_lock);
 934        /*
 935         * This is a bit sleazy.  We use j_list_lock to protect transition
 936         * of a transaction into T_FINISHED state and calling
 937         * __journal_drop_transaction(). Otherwise we could race with
 938         * other checkpointing code processing the transaction...
 939         */
 940        spin_lock(&journal->j_state_lock);
 941        spin_lock(&journal->j_list_lock);
 942        /*
 943         * Now recheck if some buffers did not get attached to the transaction
 944         * while the lock was dropped...
 945         */
 946        if (commit_transaction->t_forget) {
 947                spin_unlock(&journal->j_list_lock);
 948                spin_unlock(&journal->j_state_lock);
 949                goto restart_loop;
 950        }
 951
 952        /* Done with this transaction! */
 953
 954        jbd_debug(3, "JBD: commit phase 8\n");
 955
 956        J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
 957
 958        commit_transaction->t_state = T_FINISHED;
 959        J_ASSERT(commit_transaction == journal->j_committing_transaction);
 960        journal->j_commit_sequence = commit_transaction->t_tid;
 961        journal->j_committing_transaction = NULL;
 962        commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
 963
 964        /*
 965         * weight the commit time higher than the average time so we don't
 966         * react too strongly to vast changes in commit time
 967         */
 968        if (likely(journal->j_average_commit_time))
 969                journal->j_average_commit_time = (commit_time*3 +
 970                                journal->j_average_commit_time) / 4;
 971        else
 972                journal->j_average_commit_time = commit_time;
 973
 974        spin_unlock(&journal->j_state_lock);
 975
 976        if (commit_transaction->t_checkpoint_list == NULL &&
 977            commit_transaction->t_checkpoint_io_list == NULL) {
 978                __journal_drop_transaction(journal, commit_transaction);
 979        } else {
 980                if (journal->j_checkpoint_transactions == NULL) {
 981                        journal->j_checkpoint_transactions = commit_transaction;
 982                        commit_transaction->t_cpnext = commit_transaction;
 983                        commit_transaction->t_cpprev = commit_transaction;
 984                } else {
 985                        commit_transaction->t_cpnext =
 986                                journal->j_checkpoint_transactions;
 987                        commit_transaction->t_cpprev =
 988                                commit_transaction->t_cpnext->t_cpprev;
 989                        commit_transaction->t_cpnext->t_cpprev =
 990                                commit_transaction;
 991                        commit_transaction->t_cpprev->t_cpnext =
 992                                commit_transaction;
 993                }
 994        }
 995        spin_unlock(&journal->j_list_lock);
 996
 997        trace_jbd_end_commit(journal, commit_transaction);
 998        jbd_debug(1, "JBD: commit %d complete, head %d\n",
 999                  journal->j_commit_sequence, journal->j_tail_sequence);
1000
1001        wake_up(&journal->j_wait_done_commit);
1002}
1003
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.