linux/fs/xfs/xfs_buf_item.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_mount.h"
  27#include "xfs_buf_item.h"
  28#include "xfs_trans_priv.h"
  29#include "xfs_error.h"
  30#include "xfs_trace.h"
  31
  32
  33kmem_zone_t     *xfs_buf_item_zone;
  34
  35static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
  36{
  37        return container_of(lip, struct xfs_buf_log_item, bli_item);
  38}
  39
  40
  41#ifdef XFS_TRANS_DEBUG
  42/*
  43 * This function uses an alternate strategy for tracking the bytes
  44 * that the user requests to be logged.  This can then be used
  45 * in conjunction with the bli_orig array in the buf log item to
  46 * catch bugs in our callers' code.
  47 *
  48 * We also double check the bits set in xfs_buf_item_log using a
  49 * simple algorithm to check that every byte is accounted for.
  50 */
  51STATIC void
  52xfs_buf_item_log_debug(
  53        xfs_buf_log_item_t      *bip,
  54        uint                    first,
  55        uint                    last)
  56{
  57        uint    x;
  58        uint    byte;
  59        uint    nbytes;
  60        uint    chunk_num;
  61        uint    word_num;
  62        uint    bit_num;
  63        uint    bit_set;
  64        uint    *wordp;
  65
  66        ASSERT(bip->bli_logged != NULL);
  67        byte = first;
  68        nbytes = last - first + 1;
  69        bfset(bip->bli_logged, first, nbytes);
  70        for (x = 0; x < nbytes; x++) {
  71                chunk_num = byte >> XFS_BLF_SHIFT;
  72                word_num = chunk_num >> BIT_TO_WORD_SHIFT;
  73                bit_num = chunk_num & (NBWORD - 1);
  74                wordp = &(bip->__bli_format.blf_data_map[word_num]);
  75                bit_set = *wordp & (1 << bit_num);
  76                ASSERT(bit_set);
  77                byte++;
  78        }
  79}
  80
  81/*
  82 * This function is called when we flush something into a buffer without
  83 * logging it.  This happens for things like inodes which are logged
  84 * separately from the buffer.
  85 */
  86void
  87xfs_buf_item_flush_log_debug(
  88        xfs_buf_t       *bp,
  89        uint            first,
  90        uint            last)
  91{
  92        xfs_buf_log_item_t      *bip = bp->b_fspriv;
  93        uint                    nbytes;
  94
  95        if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
  96                return;
  97
  98        ASSERT(bip->bli_logged != NULL);
  99        nbytes = last - first + 1;
 100        bfset(bip->bli_logged, first, nbytes);
 101}
 102
 103/*
 104 * This function is called to verify that our callers have logged
 105 * all the bytes that they changed.
 106 *
 107 * It does this by comparing the original copy of the buffer stored in
 108 * the buf log item's bli_orig array to the current copy of the buffer
 109 * and ensuring that all bytes which mismatch are set in the bli_logged
 110 * array of the buf log item.
 111 */
 112STATIC void
 113xfs_buf_item_log_check(
 114        xfs_buf_log_item_t      *bip)
 115{
 116        char            *orig;
 117        char            *buffer;
 118        int             x;
 119        xfs_buf_t       *bp;
 120
 121        ASSERT(bip->bli_orig != NULL);
 122        ASSERT(bip->bli_logged != NULL);
 123
 124        bp = bip->bli_buf;
 125        ASSERT(bp->b_length > 0);
 126        ASSERT(bp->b_addr != NULL);
 127        orig = bip->bli_orig;
 128        buffer = bp->b_addr;
 129        for (x = 0; x < BBTOB(bp->b_length); x++) {
 130                if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
 131                        xfs_emerg(bp->b_mount,
 132                                "%s: bip %x buffer %x orig %x index %d",
 133                                __func__, bip, bp, orig, x);
 134                        ASSERT(0);
 135                }
 136        }
 137}
 138#else
 139#define         xfs_buf_item_log_debug(x,y,z)
 140#define         xfs_buf_item_log_check(x)
 141#endif
 142
 143STATIC void     xfs_buf_do_callbacks(struct xfs_buf *bp);
 144
 145/*
 146 * This returns the number of log iovecs needed to log the
 147 * given buf log item.
 148 *
 149 * It calculates this as 1 iovec for the buf log format structure
 150 * and 1 for each stretch of non-contiguous chunks to be logged.
 151 * Contiguous chunks are logged in a single iovec.
 152 *
 153 * If the XFS_BLI_STALE flag has been set, then log nothing.
 154 */
 155STATIC uint
 156xfs_buf_item_size_segment(
 157        struct xfs_buf_log_item *bip,
 158        struct xfs_buf_log_format *blfp)
 159{
 160        struct xfs_buf          *bp = bip->bli_buf;
 161        uint                    nvecs;
 162        int                     next_bit;
 163        int                     last_bit;
 164
 165        last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
 166        if (last_bit == -1)
 167                return 0;
 168
 169        /*
 170         * initial count for a dirty buffer is 2 vectors - the format structure
 171         * and the first dirty region.
 172         */
 173        nvecs = 2;
 174
 175        while (last_bit != -1) {
 176                /*
 177                 * This takes the bit number to start looking from and
 178                 * returns the next set bit from there.  It returns -1
 179                 * if there are no more bits set or the start bit is
 180                 * beyond the end of the bitmap.
 181                 */
 182                next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
 183                                        last_bit + 1);
 184                /*
 185                 * If we run out of bits, leave the loop,
 186                 * else if we find a new set of bits bump the number of vecs,
 187                 * else keep scanning the current set of bits.
 188                 */
 189                if (next_bit == -1) {
 190                        break;
 191                } else if (next_bit != last_bit + 1) {
 192                        last_bit = next_bit;
 193                        nvecs++;
 194                } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
 195                           (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
 196                            XFS_BLF_CHUNK)) {
 197                        last_bit = next_bit;
 198                        nvecs++;
 199                } else {
 200                        last_bit++;
 201                }
 202        }
 203
 204        return nvecs;
 205}
 206
 207/*
 208 * This returns the number of log iovecs needed to log the given buf log item.
 209 *
 210 * It calculates this as 1 iovec for the buf log format structure and 1 for each
 211 * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
 212 * in a single iovec.
 213 *
 214 * Discontiguous buffers need a format structure per region that that is being
 215 * logged. This makes the changes in the buffer appear to log recovery as though
 216 * they came from separate buffers, just like would occur if multiple buffers
 217 * were used instead of a single discontiguous buffer. This enables
 218 * discontiguous buffers to be in-memory constructs, completely transparent to
 219 * what ends up on disk.
 220 *
 221 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
 222 * format structures.
 223 */
 224STATIC uint
 225xfs_buf_item_size(
 226        struct xfs_log_item     *lip)
 227{
 228        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 229        uint                    nvecs;
 230        int                     i;
 231
 232        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 233        if (bip->bli_flags & XFS_BLI_STALE) {
 234                /*
 235                 * The buffer is stale, so all we need to log
 236                 * is the buf log format structure with the
 237                 * cancel flag in it.
 238                 */
 239                trace_xfs_buf_item_size_stale(bip);
 240                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 241                return bip->bli_format_count;
 242        }
 243
 244        ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
 245
 246        /*
 247         * the vector count is based on the number of buffer vectors we have
 248         * dirty bits in. This will only be greater than one when we have a
 249         * compound buffer with more than one segment dirty. Hence for compound
 250         * buffers we need to track which segment the dirty bits correspond to,
 251         * and when we move from one segment to the next increment the vector
 252         * count for the extra buf log format structure that will need to be
 253         * written.
 254         */
 255        nvecs = 0;
 256        for (i = 0; i < bip->bli_format_count; i++) {
 257                nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
 258        }
 259
 260        trace_xfs_buf_item_size(bip);
 261        return nvecs;
 262}
 263
 264static struct xfs_log_iovec *
 265xfs_buf_item_format_segment(
 266        struct xfs_buf_log_item *bip,
 267        struct xfs_log_iovec    *vecp,
 268        uint                    offset,
 269        struct xfs_buf_log_format *blfp)
 270{
 271        struct xfs_buf  *bp = bip->bli_buf;
 272        uint            base_size;
 273        uint            nvecs;
 274        int             first_bit;
 275        int             last_bit;
 276        int             next_bit;
 277        uint            nbits;
 278        uint            buffer_offset;
 279
 280        /* copy the flags across from the base format item */
 281        blfp->blf_flags = bip->__bli_format.blf_flags;
 282
 283        /*
 284         * Base size is the actual size of the ondisk structure - it reflects
 285         * the actual size of the dirty bitmap rather than the size of the in
 286         * memory structure.
 287         */
 288        base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
 289                        (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
 290
 291        nvecs = 0;
 292        first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
 293        if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
 294                /*
 295                 * If the map is not be dirty in the transaction, mark
 296                 * the size as zero and do not advance the vector pointer.
 297                 */
 298                goto out;
 299        }
 300
 301        vecp->i_addr = blfp;
 302        vecp->i_len = base_size;
 303        vecp->i_type = XLOG_REG_TYPE_BFORMAT;
 304        vecp++;
 305        nvecs = 1;
 306
 307        if (bip->bli_flags & XFS_BLI_STALE) {
 308                /*
 309                 * The buffer is stale, so all we need to log
 310                 * is the buf log format structure with the
 311                 * cancel flag in it.
 312                 */
 313                trace_xfs_buf_item_format_stale(bip);
 314                ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
 315                goto out;
 316        }
 317
 318        /*
 319         * Fill in an iovec for each set of contiguous chunks.
 320         */
 321
 322        last_bit = first_bit;
 323        nbits = 1;
 324        for (;;) {
 325                /*
 326                 * This takes the bit number to start looking from and
 327                 * returns the next set bit from there.  It returns -1
 328                 * if there are no more bits set or the start bit is
 329                 * beyond the end of the bitmap.
 330                 */
 331                next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
 332                                        (uint)last_bit + 1);
 333                /*
 334                 * If we run out of bits fill in the last iovec and get
 335                 * out of the loop.
 336                 * Else if we start a new set of bits then fill in the
 337                 * iovec for the series we were looking at and start
 338                 * counting the bits in the new one.
 339                 * Else we're still in the same set of bits so just
 340                 * keep counting and scanning.
 341                 */
 342                if (next_bit == -1) {
 343                        buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
 344                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
 345                        vecp->i_len = nbits * XFS_BLF_CHUNK;
 346                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
 347                        nvecs++;
 348                        break;
 349                } else if (next_bit != last_bit + 1) {
 350                        buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
 351                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
 352                        vecp->i_len = nbits * XFS_BLF_CHUNK;
 353                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
 354                        nvecs++;
 355                        vecp++;
 356                        first_bit = next_bit;
 357                        last_bit = next_bit;
 358                        nbits = 1;
 359                } else if (xfs_buf_offset(bp, offset +
 360                                              (next_bit << XFS_BLF_SHIFT)) !=
 361                           (xfs_buf_offset(bp, offset +
 362                                               (last_bit << XFS_BLF_SHIFT)) +
 363                            XFS_BLF_CHUNK)) {
 364                        buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
 365                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
 366                        vecp->i_len = nbits * XFS_BLF_CHUNK;
 367                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
 368/*
 369 * You would think we need to bump the nvecs here too, but we do not
 370 * this number is used by recovery, and it gets confused by the boundary
 371 * split here
 372 *                      nvecs++;
 373 */
 374                        vecp++;
 375                        first_bit = next_bit;
 376                        last_bit = next_bit;
 377                        nbits = 1;
 378                } else {
 379                        last_bit++;
 380                        nbits++;
 381                }
 382        }
 383out:
 384        blfp->blf_size = nvecs;
 385        return vecp;
 386}
 387
 388/*
 389 * This is called to fill in the vector of log iovecs for the
 390 * given log buf item.  It fills the first entry with a buf log
 391 * format structure, and the rest point to contiguous chunks
 392 * within the buffer.
 393 */
 394STATIC void
 395xfs_buf_item_format(
 396        struct xfs_log_item     *lip,
 397        struct xfs_log_iovec    *vecp)
 398{
 399        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 400        struct xfs_buf          *bp = bip->bli_buf;
 401        uint                    offset = 0;
 402        int                     i;
 403
 404        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 405        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
 406               (bip->bli_flags & XFS_BLI_STALE));
 407
 408        /*
 409         * If it is an inode buffer, transfer the in-memory state to the
 410         * format flags and clear the in-memory state. We do not transfer
 411         * this state if the inode buffer allocation has not yet been committed
 412         * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
 413         * correct replay of the inode allocation.
 414         */
 415        if (bip->bli_flags & XFS_BLI_INODE_BUF) {
 416                if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
 417                      xfs_log_item_in_current_chkpt(lip)))
 418                        bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
 419                bip->bli_flags &= ~XFS_BLI_INODE_BUF;
 420        }
 421
 422        for (i = 0; i < bip->bli_format_count; i++) {
 423                vecp = xfs_buf_item_format_segment(bip, vecp, offset,
 424                                                &bip->bli_formats[i]);
 425                offset += bp->b_maps[i].bm_len;
 426        }
 427
 428        /*
 429         * Check to make sure everything is consistent.
 430         */
 431        trace_xfs_buf_item_format(bip);
 432        xfs_buf_item_log_check(bip);
 433}
 434
 435/*
 436 * This is called to pin the buffer associated with the buf log item in memory
 437 * so it cannot be written out.
 438 *
 439 * We also always take a reference to the buffer log item here so that the bli
 440 * is held while the item is pinned in memory. This means that we can
 441 * unconditionally drop the reference count a transaction holds when the
 442 * transaction is completed.
 443 */
 444STATIC void
 445xfs_buf_item_pin(
 446        struct xfs_log_item     *lip)
 447{
 448        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 449
 450        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 451        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
 452               (bip->bli_flags & XFS_BLI_STALE));
 453
 454        trace_xfs_buf_item_pin(bip);
 455
 456        atomic_inc(&bip->bli_refcount);
 457        atomic_inc(&bip->bli_buf->b_pin_count);
 458}
 459
 460/*
 461 * This is called to unpin the buffer associated with the buf log
 462 * item which was previously pinned with a call to xfs_buf_item_pin().
 463 *
 464 * Also drop the reference to the buf item for the current transaction.
 465 * If the XFS_BLI_STALE flag is set and we are the last reference,
 466 * then free up the buf log item and unlock the buffer.
 467 *
 468 * If the remove flag is set we are called from uncommit in the
 469 * forced-shutdown path.  If that is true and the reference count on
 470 * the log item is going to drop to zero we need to free the item's
 471 * descriptor in the transaction.
 472 */
 473STATIC void
 474xfs_buf_item_unpin(
 475        struct xfs_log_item     *lip,
 476        int                     remove)
 477{
 478        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 479        xfs_buf_t       *bp = bip->bli_buf;
 480        struct xfs_ail  *ailp = lip->li_ailp;
 481        int             stale = bip->bli_flags & XFS_BLI_STALE;
 482        int             freed;
 483
 484        ASSERT(bp->b_fspriv == bip);
 485        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 486
 487        trace_xfs_buf_item_unpin(bip);
 488
 489        freed = atomic_dec_and_test(&bip->bli_refcount);
 490
 491        if (atomic_dec_and_test(&bp->b_pin_count))
 492                wake_up_all(&bp->b_waiters);
 493
 494        if (freed && stale) {
 495                ASSERT(bip->bli_flags & XFS_BLI_STALE);
 496                ASSERT(xfs_buf_islocked(bp));
 497                ASSERT(XFS_BUF_ISSTALE(bp));
 498                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 499
 500                trace_xfs_buf_item_unpin_stale(bip);
 501
 502                if (remove) {
 503                        /*
 504                         * If we are in a transaction context, we have to
 505                         * remove the log item from the transaction as we are
 506                         * about to release our reference to the buffer.  If we
 507                         * don't, the unlock that occurs later in
 508                         * xfs_trans_uncommit() will try to reference the
 509                         * buffer which we no longer have a hold on.
 510                         */
 511                        if (lip->li_desc)
 512                                xfs_trans_del_item(lip);
 513
 514                        /*
 515                         * Since the transaction no longer refers to the buffer,
 516                         * the buffer should no longer refer to the transaction.
 517                         */
 518                        bp->b_transp = NULL;
 519                }
 520
 521                /*
 522                 * If we get called here because of an IO error, we may
 523                 * or may not have the item on the AIL. xfs_trans_ail_delete()
 524                 * will take care of that situation.
 525                 * xfs_trans_ail_delete() drops the AIL lock.
 526                 */
 527                if (bip->bli_flags & XFS_BLI_STALE_INODE) {
 528                        xfs_buf_do_callbacks(bp);
 529                        bp->b_fspriv = NULL;
 530                        bp->b_iodone = NULL;
 531                } else {
 532                        spin_lock(&ailp->xa_lock);
 533                        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
 534                        xfs_buf_item_relse(bp);
 535                        ASSERT(bp->b_fspriv == NULL);
 536                }
 537                xfs_buf_relse(bp);
 538        } else if (freed && remove) {
 539                /*
 540                 * There are currently two references to the buffer - the active
 541                 * LRU reference and the buf log item. What we are about to do
 542                 * here - simulate a failed IO completion - requires 3
 543                 * references.
 544                 *
 545                 * The LRU reference is removed by the xfs_buf_stale() call. The
 546                 * buf item reference is removed by the xfs_buf_iodone()
 547                 * callback that is run by xfs_buf_do_callbacks() during ioend
 548                 * processing (via the bp->b_iodone callback), and then finally
 549                 * the ioend processing will drop the IO reference if the buffer
 550                 * is marked XBF_ASYNC.
 551                 *
 552                 * Hence we need to take an additional reference here so that IO
 553                 * completion processing doesn't free the buffer prematurely.
 554                 */
 555                xfs_buf_lock(bp);
 556                xfs_buf_hold(bp);
 557                bp->b_flags |= XBF_ASYNC;
 558                xfs_buf_ioerror(bp, EIO);
 559                XFS_BUF_UNDONE(bp);
 560                xfs_buf_stale(bp);
 561                xfs_buf_ioend(bp, 0);
 562        }
 563}
 564
 565STATIC uint
 566xfs_buf_item_push(
 567        struct xfs_log_item     *lip,
 568        struct list_head        *buffer_list)
 569{
 570        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 571        struct xfs_buf          *bp = bip->bli_buf;
 572        uint                    rval = XFS_ITEM_SUCCESS;
 573
 574        if (xfs_buf_ispinned(bp))
 575                return XFS_ITEM_PINNED;
 576        if (!xfs_buf_trylock(bp))
 577                return XFS_ITEM_LOCKED;
 578
 579        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 580
 581        trace_xfs_buf_item_push(bip);
 582
 583        if (!xfs_buf_delwri_queue(bp, buffer_list))
 584                rval = XFS_ITEM_FLUSHING;
 585        xfs_buf_unlock(bp);
 586        return rval;
 587}
 588
 589/*
 590 * Release the buffer associated with the buf log item.  If there is no dirty
 591 * logged data associated with the buffer recorded in the buf log item, then
 592 * free the buf log item and remove the reference to it in the buffer.
 593 *
 594 * This call ignores the recursion count.  It is only called when the buffer
 595 * should REALLY be unlocked, regardless of the recursion count.
 596 *
 597 * We unconditionally drop the transaction's reference to the log item. If the
 598 * item was logged, then another reference was taken when it was pinned, so we
 599 * can safely drop the transaction reference now.  This also allows us to avoid
 600 * potential races with the unpin code freeing the bli by not referencing the
 601 * bli after we've dropped the reference count.
 602 *
 603 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
 604 * if necessary but do not unlock the buffer.  This is for support of
 605 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
 606 * free the item.
 607 */
 608STATIC void
 609xfs_buf_item_unlock(
 610        struct xfs_log_item     *lip)
 611{
 612        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 613        struct xfs_buf          *bp = bip->bli_buf;
 614        int                     aborted, clean, i;
 615        uint                    hold;
 616
 617        /* Clear the buffer's association with this transaction. */
 618        bp->b_transp = NULL;
 619
 620        /*
 621         * If this is a transaction abort, don't return early.  Instead, allow
 622         * the brelse to happen.  Normally it would be done for stale
 623         * (cancelled) buffers at unpin time, but we'll never go through the
 624         * pin/unpin cycle if we abort inside commit.
 625         */
 626        aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
 627
 628        /*
 629         * Before possibly freeing the buf item, determine if we should
 630         * release the buffer at the end of this routine.
 631         */
 632        hold = bip->bli_flags & XFS_BLI_HOLD;
 633
 634        /* Clear the per transaction state. */
 635        bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
 636
 637        /*
 638         * If the buf item is marked stale, then don't do anything.  We'll
 639         * unlock the buffer and free the buf item when the buffer is unpinned
 640         * for the last time.
 641         */
 642        if (bip->bli_flags & XFS_BLI_STALE) {
 643                trace_xfs_buf_item_unlock_stale(bip);
 644                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 645                if (!aborted) {
 646                        atomic_dec(&bip->bli_refcount);
 647                        return;
 648                }
 649        }
 650
 651        trace_xfs_buf_item_unlock(bip);
 652
 653        /*
 654         * If the buf item isn't tracking any data, free it, otherwise drop the
 655         * reference we hold to it. If we are aborting the transaction, this may
 656         * be the only reference to the buf item, so we free it anyway
 657         * regardless of whether it is dirty or not. A dirty abort implies a
 658         * shutdown, anyway.
 659         */
 660        clean = 1;
 661        for (i = 0; i < bip->bli_format_count; i++) {
 662                if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
 663                             bip->bli_formats[i].blf_map_size)) {
 664                        clean = 0;
 665                        break;
 666                }
 667        }
 668        if (clean)
 669                xfs_buf_item_relse(bp);
 670        else if (aborted) {
 671                if (atomic_dec_and_test(&bip->bli_refcount)) {
 672                        ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
 673                        xfs_buf_item_relse(bp);
 674                }
 675        } else
 676                atomic_dec(&bip->bli_refcount);
 677
 678        if (!hold)
 679                xfs_buf_relse(bp);
 680}
 681
 682/*
 683 * This is called to find out where the oldest active copy of the
 684 * buf log item in the on disk log resides now that the last log
 685 * write of it completed at the given lsn.
 686 * We always re-log all the dirty data in a buffer, so usually the
 687 * latest copy in the on disk log is the only one that matters.  For
 688 * those cases we simply return the given lsn.
 689 *
 690 * The one exception to this is for buffers full of newly allocated
 691 * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
 692 * flag set, indicating that only the di_next_unlinked fields from the
 693 * inodes in the buffers will be replayed during recovery.  If the
 694 * original newly allocated inode images have not yet been flushed
 695 * when the buffer is so relogged, then we need to make sure that we
 696 * keep the old images in the 'active' portion of the log.  We do this
 697 * by returning the original lsn of that transaction here rather than
 698 * the current one.
 699 */
 700STATIC xfs_lsn_t
 701xfs_buf_item_committed(
 702        struct xfs_log_item     *lip,
 703        xfs_lsn_t               lsn)
 704{
 705        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 706
 707        trace_xfs_buf_item_committed(bip);
 708
 709        if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
 710                return lip->li_lsn;
 711        return lsn;
 712}
 713
 714STATIC void
 715xfs_buf_item_committing(
 716        struct xfs_log_item     *lip,
 717        xfs_lsn_t               commit_lsn)
 718{
 719}
 720
 721/*
 722 * This is the ops vector shared by all buf log items.
 723 */
 724static const struct xfs_item_ops xfs_buf_item_ops = {
 725        .iop_size       = xfs_buf_item_size,
 726        .iop_format     = xfs_buf_item_format,
 727        .iop_pin        = xfs_buf_item_pin,
 728        .iop_unpin      = xfs_buf_item_unpin,
 729        .iop_unlock     = xfs_buf_item_unlock,
 730        .iop_committed  = xfs_buf_item_committed,
 731        .iop_push       = xfs_buf_item_push,
 732        .iop_committing = xfs_buf_item_committing
 733};
 734
 735STATIC int
 736xfs_buf_item_get_format(
 737        struct xfs_buf_log_item *bip,
 738        int                     count)
 739{
 740        ASSERT(bip->bli_formats == NULL);
 741        bip->bli_format_count = count;
 742
 743        if (count == 1) {
 744                bip->bli_formats = &bip->__bli_format;
 745                return 0;
 746        }
 747
 748        bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
 749                                KM_SLEEP);
 750        if (!bip->bli_formats)
 751                return ENOMEM;
 752        return 0;
 753}
 754
 755STATIC void
 756xfs_buf_item_free_format(
 757        struct xfs_buf_log_item *bip)
 758{
 759        if (bip->bli_formats != &bip->__bli_format) {
 760                kmem_free(bip->bli_formats);
 761                bip->bli_formats = NULL;
 762        }
 763}
 764
 765/*
 766 * Allocate a new buf log item to go with the given buffer.
 767 * Set the buffer's b_fsprivate field to point to the new
 768 * buf log item.  If there are other item's attached to the
 769 * buffer (see xfs_buf_attach_iodone() below), then put the
 770 * buf log item at the front.
 771 */
 772void
 773xfs_buf_item_init(
 774        xfs_buf_t       *bp,
 775        xfs_mount_t     *mp)
 776{
 777        xfs_log_item_t          *lip = bp->b_fspriv;
 778        xfs_buf_log_item_t      *bip;
 779        int                     chunks;
 780        int                     map_size;
 781        int                     error;
 782        int                     i;
 783
 784        /*
 785         * Check to see if there is already a buf log item for
 786         * this buffer.  If there is, it is guaranteed to be
 787         * the first.  If we do already have one, there is
 788         * nothing to do here so return.
 789         */
 790        ASSERT(bp->b_target->bt_mount == mp);
 791        if (lip != NULL && lip->li_type == XFS_LI_BUF)
 792                return;
 793
 794        bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
 795        xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
 796        bip->bli_buf = bp;
 797        xfs_buf_hold(bp);
 798
 799        /*
 800         * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
 801         * can be divided into. Make sure not to truncate any pieces.
 802         * map_size is the size of the bitmap needed to describe the
 803         * chunks of the buffer.
 804         *
 805         * Discontiguous buffer support follows the layout of the underlying
 806         * buffer. This makes the implementation as simple as possible.
 807         */
 808        error = xfs_buf_item_get_format(bip, bp->b_map_count);
 809        ASSERT(error == 0);
 810
 811        for (i = 0; i < bip->bli_format_count; i++) {
 812                chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
 813                                      XFS_BLF_CHUNK);
 814                map_size = DIV_ROUND_UP(chunks, NBWORD);
 815
 816                bip->bli_formats[i].blf_type = XFS_LI_BUF;
 817                bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
 818                bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
 819                bip->bli_formats[i].blf_map_size = map_size;
 820        }
 821
 822#ifdef XFS_TRANS_DEBUG
 823        /*
 824         * Allocate the arrays for tracking what needs to be logged
 825         * and what our callers request to be logged.  bli_orig
 826         * holds a copy of the original, clean buffer for comparison
 827         * against, and bli_logged keeps a 1 bit flag per byte in
 828         * the buffer to indicate which bytes the callers have asked
 829         * to have logged.
 830         */
 831        bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
 832        memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
 833        bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
 834#endif
 835
 836        /*
 837         * Put the buf item into the list of items attached to the
 838         * buffer at the front.
 839         */
 840        if (bp->b_fspriv)
 841                bip->bli_item.li_bio_list = bp->b_fspriv;
 842        bp->b_fspriv = bip;
 843}
 844
 845
 846/*
 847 * Mark bytes first through last inclusive as dirty in the buf
 848 * item's bitmap.
 849 */
 850void
 851xfs_buf_item_log_segment(
 852        struct xfs_buf_log_item *bip,
 853        uint                    first,
 854        uint                    last,
 855        uint                    *map)
 856{
 857        uint            first_bit;
 858        uint            last_bit;
 859        uint            bits_to_set;
 860        uint            bits_set;
 861        uint            word_num;
 862        uint            *wordp;
 863        uint            bit;
 864        uint            end_bit;
 865        uint            mask;
 866
 867        /*
 868         * Convert byte offsets to bit numbers.
 869         */
 870        first_bit = first >> XFS_BLF_SHIFT;
 871        last_bit = last >> XFS_BLF_SHIFT;
 872
 873        /*
 874         * Calculate the total number of bits to be set.
 875         */
 876        bits_to_set = last_bit - first_bit + 1;
 877
 878        /*
 879         * Get a pointer to the first word in the bitmap
 880         * to set a bit in.
 881         */
 882        word_num = first_bit >> BIT_TO_WORD_SHIFT;
 883        wordp = &map[word_num];
 884
 885        /*
 886         * Calculate the starting bit in the first word.
 887         */
 888        bit = first_bit & (uint)(NBWORD - 1);
 889
 890        /*
 891         * First set any bits in the first word of our range.
 892         * If it starts at bit 0 of the word, it will be
 893         * set below rather than here.  That is what the variable
 894         * bit tells us. The variable bits_set tracks the number
 895         * of bits that have been set so far.  End_bit is the number
 896         * of the last bit to be set in this word plus one.
 897         */
 898        if (bit) {
 899                end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
 900                mask = ((1 << (end_bit - bit)) - 1) << bit;
 901                *wordp |= mask;
 902                wordp++;
 903                bits_set = end_bit - bit;
 904        } else {
 905                bits_set = 0;
 906        }
 907
 908        /*
 909         * Now set bits a whole word at a time that are between
 910         * first_bit and last_bit.
 911         */
 912        while ((bits_to_set - bits_set) >= NBWORD) {
 913                *wordp |= 0xffffffff;
 914                bits_set += NBWORD;
 915                wordp++;
 916        }
 917
 918        /*
 919         * Finally, set any bits left to be set in one last partial word.
 920         */
 921        end_bit = bits_to_set - bits_set;
 922        if (end_bit) {
 923                mask = (1 << end_bit) - 1;
 924                *wordp |= mask;
 925        }
 926
 927        xfs_buf_item_log_debug(bip, first, last);
 928}
 929
 930/*
 931 * Mark bytes first through last inclusive as dirty in the buf
 932 * item's bitmap.
 933 */
 934void
 935xfs_buf_item_log(
 936        xfs_buf_log_item_t      *bip,
 937        uint                    first,
 938        uint                    last)
 939{
 940        int                     i;
 941        uint                    start;
 942        uint                    end;
 943        struct xfs_buf          *bp = bip->bli_buf;
 944
 945        /*
 946         * Mark the item as having some dirty data for
 947         * quick reference in xfs_buf_item_dirty.
 948         */
 949        bip->bli_flags |= XFS_BLI_DIRTY;
 950
 951        /*
 952         * walk each buffer segment and mark them dirty appropriately.
 953         */
 954        start = 0;
 955        for (i = 0; i < bip->bli_format_count; i++) {
 956                if (start > last)
 957                        break;
 958                end = start + BBTOB(bp->b_maps[i].bm_len);
 959                if (first > end) {
 960                        start += BBTOB(bp->b_maps[i].bm_len);
 961                        continue;
 962                }
 963                if (first < start)
 964                        first = start;
 965                if (end > last)
 966                        end = last;
 967
 968                xfs_buf_item_log_segment(bip, first, end,
 969                                         &bip->bli_formats[i].blf_data_map[0]);
 970
 971                start += bp->b_maps[i].bm_len;
 972        }
 973}
 974
 975
 976/*
 977 * Return 1 if the buffer has some data that has been logged (at any
 978 * point, not just the current transaction) and 0 if not.
 979 */
 980uint
 981xfs_buf_item_dirty(
 982        xfs_buf_log_item_t      *bip)
 983{
 984        return (bip->bli_flags & XFS_BLI_DIRTY);
 985}
 986
 987STATIC void
 988xfs_buf_item_free(
 989        xfs_buf_log_item_t      *bip)
 990{
 991#ifdef XFS_TRANS_DEBUG
 992        kmem_free(bip->bli_orig);
 993        kmem_free(bip->bli_logged);
 994#endif /* XFS_TRANS_DEBUG */
 995
 996        xfs_buf_item_free_format(bip);
 997        kmem_zone_free(xfs_buf_item_zone, bip);
 998}
 999
1000/*
1001 * This is called when the buf log item is no longer needed.  It should
1002 * free the buf log item associated with the given buffer and clear
1003 * the buffer's pointer to the buf log item.  If there are no more
1004 * items in the list, clear the b_iodone field of the buffer (see
1005 * xfs_buf_attach_iodone() below).
1006 */
1007void
1008xfs_buf_item_relse(
1009        xfs_buf_t       *bp)
1010{
1011        xfs_buf_log_item_t      *bip;
1012
1013        trace_xfs_buf_item_relse(bp, _RET_IP_);
1014
1015        bip = bp->b_fspriv;
1016        bp->b_fspriv = bip->bli_item.li_bio_list;
1017        if (bp->b_fspriv == NULL)
1018                bp->b_iodone = NULL;
1019
1020        xfs_buf_rele(bp);
1021        xfs_buf_item_free(bip);
1022}
1023
1024
1025/*
1026 * Add the given log item with its callback to the list of callbacks
1027 * to be called when the buffer's I/O completes.  If it is not set
1028 * already, set the buffer's b_iodone() routine to be
1029 * xfs_buf_iodone_callbacks() and link the log item into the list of
1030 * items rooted at b_fsprivate.  Items are always added as the second
1031 * entry in the list if there is a first, because the buf item code
1032 * assumes that the buf log item is first.
1033 */
1034void
1035xfs_buf_attach_iodone(
1036        xfs_buf_t       *bp,
1037        void            (*cb)(xfs_buf_t *, xfs_log_item_t *),
1038        xfs_log_item_t  *lip)
1039{
1040        xfs_log_item_t  *head_lip;
1041
1042        ASSERT(xfs_buf_islocked(bp));
1043
1044        lip->li_cb = cb;
1045        head_lip = bp->b_fspriv;
1046        if (head_lip) {
1047                lip->li_bio_list = head_lip->li_bio_list;
1048                head_lip->li_bio_list = lip;
1049        } else {
1050                bp->b_fspriv = lip;
1051        }
1052
1053        ASSERT(bp->b_iodone == NULL ||
1054               bp->b_iodone == xfs_buf_iodone_callbacks);
1055        bp->b_iodone = xfs_buf_iodone_callbacks;
1056}
1057
1058/*
1059 * We can have many callbacks on a buffer. Running the callbacks individually
1060 * can cause a lot of contention on the AIL lock, so we allow for a single
1061 * callback to be able to scan the remaining lip->li_bio_list for other items
1062 * of the same type and callback to be processed in the first call.
1063 *
1064 * As a result, the loop walking the callback list below will also modify the
1065 * list. it removes the first item from the list and then runs the callback.
1066 * The loop then restarts from the new head of the list. This allows the
1067 * callback to scan and modify the list attached to the buffer and we don't
1068 * have to care about maintaining a next item pointer.
1069 */
1070STATIC void
1071xfs_buf_do_callbacks(
1072        struct xfs_buf          *bp)
1073{
1074        struct xfs_log_item     *lip;
1075
1076        while ((lip = bp->b_fspriv) != NULL) {
1077                bp->b_fspriv = lip->li_bio_list;
1078                ASSERT(lip->li_cb != NULL);
1079                /*
1080                 * Clear the next pointer so we don't have any
1081                 * confusion if the item is added to another buf.
1082                 * Don't touch the log item after calling its
1083                 * callback, because it could have freed itself.
1084                 */
1085                lip->li_bio_list = NULL;
1086                lip->li_cb(bp, lip);
1087        }
1088}
1089
1090/*
1091 * This is the iodone() function for buffers which have had callbacks
1092 * attached to them by xfs_buf_attach_iodone().  It should remove each
1093 * log item from the buffer's list and call the callback of each in turn.
1094 * When done, the buffer's fsprivate field is set to NULL and the buffer
1095 * is unlocked with a call to iodone().
1096 */
1097void
1098xfs_buf_iodone_callbacks(
1099        struct xfs_buf          *bp)
1100{
1101        struct xfs_log_item     *lip = bp->b_fspriv;
1102        struct xfs_mount        *mp = lip->li_mountp;
1103        static ulong            lasttime;
1104        static xfs_buftarg_t    *lasttarg;
1105
1106        if (likely(!xfs_buf_geterror(bp)))
1107                goto do_callbacks;
1108
1109        /*
1110         * If we've already decided to shutdown the filesystem because of
1111         * I/O errors, there's no point in giving this a retry.
1112         */
1113        if (XFS_FORCED_SHUTDOWN(mp)) {
1114                xfs_buf_stale(bp);
1115                XFS_BUF_DONE(bp);
1116                trace_xfs_buf_item_iodone(bp, _RET_IP_);
1117                goto do_callbacks;
1118        }
1119
1120        if (bp->b_target != lasttarg ||
1121            time_after(jiffies, (lasttime + 5*HZ))) {
1122                lasttime = jiffies;
1123                xfs_buf_ioerror_alert(bp, __func__);
1124        }
1125        lasttarg = bp->b_target;
1126
1127        /*
1128         * If the write was asynchronous then no one will be looking for the
1129         * error.  Clear the error state and write the buffer out again.
1130         *
1131         * XXX: This helps against transient write errors, but we need to find
1132         * a way to shut the filesystem down if the writes keep failing.
1133         *
1134         * In practice we'll shut the filesystem down soon as non-transient
1135         * erorrs tend to affect the whole device and a failing log write
1136         * will make us give up.  But we really ought to do better here.
1137         */
1138        if (XFS_BUF_ISASYNC(bp)) {
1139                ASSERT(bp->b_iodone != NULL);
1140
1141                trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1142
1143                xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1144
1145                if (!XFS_BUF_ISSTALE(bp)) {
1146                        bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
1147                        xfs_buf_iorequest(bp);
1148                } else {
1149                        xfs_buf_relse(bp);
1150                }
1151
1152                return;
1153        }
1154
1155        /*
1156         * If the write of the buffer was synchronous, we want to make
1157         * sure to return the error to the caller of xfs_bwrite().
1158         */
1159        xfs_buf_stale(bp);
1160        XFS_BUF_DONE(bp);
1161
1162        trace_xfs_buf_error_relse(bp, _RET_IP_);
1163
1164do_callbacks:
1165        xfs_buf_do_callbacks(bp);
1166        bp->b_fspriv = NULL;
1167        bp->b_iodone = NULL;
1168        xfs_buf_ioend(bp, 0);
1169}
1170
1171/*
1172 * This is the iodone() function for buffers which have been
1173 * logged.  It is called when they are eventually flushed out.
1174 * It should remove the buf item from the AIL, and free the buf item.
1175 * It is called by xfs_buf_iodone_callbacks() above which will take
1176 * care of cleaning up the buffer itself.
1177 */
1178void
1179xfs_buf_iodone(
1180        struct xfs_buf          *bp,
1181        struct xfs_log_item     *lip)
1182{
1183        struct xfs_ail          *ailp = lip->li_ailp;
1184
1185        ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1186
1187        xfs_buf_rele(bp);
1188
1189        /*
1190         * If we are forcibly shutting down, this may well be
1191         * off the AIL already. That's because we simulate the
1192         * log-committed callbacks to unpin these buffers. Or we may never
1193         * have put this item on AIL because of the transaction was
1194         * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1195         *
1196         * Either way, AIL is useless if we're forcing a shutdown.
1197         */
1198        spin_lock(&ailp->xa_lock);
1199        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1200        xfs_buf_item_free(BUF_ITEM(lip));
1201}
1202
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.