linux/fs/xfs/xfs_buf_item.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_mount.h"
  27#include "xfs_buf_item.h"
  28#include "xfs_trans_priv.h"
  29#include "xfs_error.h"
  30#include "xfs_trace.h"
  31
  32
  33kmem_zone_t     *xfs_buf_item_zone;
  34
  35static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
  36{
  37        return container_of(lip, struct xfs_buf_log_item, bli_item);
  38}
  39
  40
  41#ifdef XFS_TRANS_DEBUG
  42/*
  43 * This function uses an alternate strategy for tracking the bytes
  44 * that the user requests to be logged.  This can then be used
  45 * in conjunction with the bli_orig array in the buf log item to
  46 * catch bugs in our callers' code.
  47 *
  48 * We also double check the bits set in xfs_buf_item_log using a
  49 * simple algorithm to check that every byte is accounted for.
  50 */
  51STATIC void
  52xfs_buf_item_log_debug(
  53        xfs_buf_log_item_t      *bip,
  54        uint                    first,
  55        uint                    last)
  56{
  57        uint    x;
  58        uint    byte;
  59        uint    nbytes;
  60        uint    chunk_num;
  61        uint    word_num;
  62        uint    bit_num;
  63        uint    bit_set;
  64        uint    *wordp;
  65
  66        ASSERT(bip->bli_logged != NULL);
  67        byte = first;
  68        nbytes = last - first + 1;
  69        bfset(bip->bli_logged, first, nbytes);
  70        for (x = 0; x < nbytes; x++) {
  71                chunk_num = byte >> XFS_BLF_SHIFT;
  72                word_num = chunk_num >> BIT_TO_WORD_SHIFT;
  73                bit_num = chunk_num & (NBWORD - 1);
  74                wordp = &(bip->bli_format.blf_data_map[word_num]);
  75                bit_set = *wordp & (1 << bit_num);
  76                ASSERT(bit_set);
  77                byte++;
  78        }
  79}
  80
  81/*
  82 * This function is called when we flush something into a buffer without
  83 * logging it.  This happens for things like inodes which are logged
  84 * separately from the buffer.
  85 */
  86void
  87xfs_buf_item_flush_log_debug(
  88        xfs_buf_t       *bp,
  89        uint            first,
  90        uint            last)
  91{
  92        xfs_buf_log_item_t      *bip = bp->b_fspriv;
  93        uint                    nbytes;
  94
  95        if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
  96                return;
  97
  98        ASSERT(bip->bli_logged != NULL);
  99        nbytes = last - first + 1;
 100        bfset(bip->bli_logged, first, nbytes);
 101}
 102
 103/*
 104 * This function is called to verify that our callers have logged
 105 * all the bytes that they changed.
 106 *
 107 * It does this by comparing the original copy of the buffer stored in
 108 * the buf log item's bli_orig array to the current copy of the buffer
 109 * and ensuring that all bytes which mismatch are set in the bli_logged
 110 * array of the buf log item.
 111 */
 112STATIC void
 113xfs_buf_item_log_check(
 114        xfs_buf_log_item_t      *bip)
 115{
 116        char            *orig;
 117        char            *buffer;
 118        int             x;
 119        xfs_buf_t       *bp;
 120
 121        ASSERT(bip->bli_orig != NULL);
 122        ASSERT(bip->bli_logged != NULL);
 123
 124        bp = bip->bli_buf;
 125        ASSERT(bp->b_length > 0);
 126        ASSERT(bp->b_addr != NULL);
 127        orig = bip->bli_orig;
 128        buffer = bp->b_addr;
 129        for (x = 0; x < BBTOB(bp->b_length); x++) {
 130                if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
 131                        xfs_emerg(bp->b_mount,
 132                                "%s: bip %x buffer %x orig %x index %d",
 133                                __func__, bip, bp, orig, x);
 134                        ASSERT(0);
 135                }
 136        }
 137}
 138#else
 139#define         xfs_buf_item_log_debug(x,y,z)
 140#define         xfs_buf_item_log_check(x)
 141#endif
 142
 143STATIC void     xfs_buf_do_callbacks(struct xfs_buf *bp);
 144
 145/*
 146 * This returns the number of log iovecs needed to log the
 147 * given buf log item.
 148 *
 149 * It calculates this as 1 iovec for the buf log format structure
 150 * and 1 for each stretch of non-contiguous chunks to be logged.
 151 * Contiguous chunks are logged in a single iovec.
 152 *
 153 * If the XFS_BLI_STALE flag has been set, then log nothing.
 154 */
 155STATIC uint
 156xfs_buf_item_size_segment(
 157        struct xfs_buf_log_item *bip,
 158        struct xfs_buf_log_format *blfp)
 159{
 160        struct xfs_buf          *bp = bip->bli_buf;
 161        uint                    nvecs;
 162        int                     next_bit;
 163        int                     last_bit;
 164
 165        last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
 166        if (last_bit == -1)
 167                return 0;
 168
 169        /*
 170         * initial count for a dirty buffer is 2 vectors - the format structure
 171         * and the first dirty region.
 172         */
 173        nvecs = 2;
 174
 175        while (last_bit != -1) {
 176                /*
 177                 * This takes the bit number to start looking from and
 178                 * returns the next set bit from there.  It returns -1
 179                 * if there are no more bits set or the start bit is
 180                 * beyond the end of the bitmap.
 181                 */
 182                next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
 183                                        last_bit + 1);
 184                /*
 185                 * If we run out of bits, leave the loop,
 186                 * else if we find a new set of bits bump the number of vecs,
 187                 * else keep scanning the current set of bits.
 188                 */
 189                if (next_bit == -1) {
 190                        break;
 191                } else if (next_bit != last_bit + 1) {
 192                        last_bit = next_bit;
 193                        nvecs++;
 194                } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
 195                           (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
 196                            XFS_BLF_CHUNK)) {
 197                        last_bit = next_bit;
 198                        nvecs++;
 199                } else {
 200                        last_bit++;
 201                }
 202        }
 203
 204        return nvecs;
 205}
 206
 207/*
 208 * This returns the number of log iovecs needed to log the given buf log item.
 209 *
 210 * It calculates this as 1 iovec for the buf log format structure and 1 for each
 211 * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
 212 * in a single iovec.
 213 *
 214 * Discontiguous buffers need a format structure per region that that is being
 215 * logged. This makes the changes in the buffer appear to log recovery as though
 216 * they came from separate buffers, just like would occur if multiple buffers
 217 * were used instead of a single discontiguous buffer. This enables
 218 * discontiguous buffers to be in-memory constructs, completely transparent to
 219 * what ends up on disk.
 220 *
 221 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
 222 * format structures.
 223 */
 224STATIC uint
 225xfs_buf_item_size(
 226        struct xfs_log_item     *lip)
 227{
 228        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 229        uint                    nvecs;
 230        int                     i;
 231
 232        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 233        if (bip->bli_flags & XFS_BLI_STALE) {
 234                /*
 235                 * The buffer is stale, so all we need to log
 236                 * is the buf log format structure with the
 237                 * cancel flag in it.
 238                 */
 239                trace_xfs_buf_item_size_stale(bip);
 240                ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
 241                return bip->bli_format_count;
 242        }
 243
 244        ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
 245
 246        /*
 247         * the vector count is based on the number of buffer vectors we have
 248         * dirty bits in. This will only be greater than one when we have a
 249         * compound buffer with more than one segment dirty. Hence for compound
 250         * buffers we need to track which segment the dirty bits correspond to,
 251         * and when we move from one segment to the next increment the vector
 252         * count for the extra buf log format structure that will need to be
 253         * written.
 254         */
 255        nvecs = 0;
 256        for (i = 0; i < bip->bli_format_count; i++) {
 257                nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
 258        }
 259
 260        trace_xfs_buf_item_size(bip);
 261        return nvecs;
 262}
 263
 264static struct xfs_log_iovec *
 265xfs_buf_item_format_segment(
 266        struct xfs_buf_log_item *bip,
 267        struct xfs_log_iovec    *vecp,
 268        uint                    offset,
 269        struct xfs_buf_log_format *blfp)
 270{
 271        struct xfs_buf  *bp = bip->bli_buf;
 272        uint            base_size;
 273        uint            nvecs;
 274        int             first_bit;
 275        int             last_bit;
 276        int             next_bit;
 277        uint            nbits;
 278        uint            buffer_offset;
 279
 280        /* copy the flags across from the base format item */
 281        blfp->blf_flags = bip->bli_format.blf_flags;
 282
 283        /*
 284         * Base size is the actual size of the ondisk structure - it reflects
 285         * the actual size of the dirty bitmap rather than the size of the in
 286         * memory structure.
 287         */
 288        base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
 289                        (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
 290        vecp->i_addr = blfp;
 291        vecp->i_len = base_size;
 292        vecp->i_type = XLOG_REG_TYPE_BFORMAT;
 293        vecp++;
 294        nvecs = 1;
 295
 296        if (bip->bli_flags & XFS_BLI_STALE) {
 297                /*
 298                 * The buffer is stale, so all we need to log
 299                 * is the buf log format structure with the
 300                 * cancel flag in it.
 301                 */
 302                trace_xfs_buf_item_format_stale(bip);
 303                ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
 304                blfp->blf_size = nvecs;
 305                return vecp;
 306        }
 307
 308        /*
 309         * Fill in an iovec for each set of contiguous chunks.
 310         */
 311        first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
 312        ASSERT(first_bit != -1);
 313        last_bit = first_bit;
 314        nbits = 1;
 315        for (;;) {
 316                /*
 317                 * This takes the bit number to start looking from and
 318                 * returns the next set bit from there.  It returns -1
 319                 * if there are no more bits set or the start bit is
 320                 * beyond the end of the bitmap.
 321                 */
 322                next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
 323                                        (uint)last_bit + 1);
 324                /*
 325                 * If we run out of bits fill in the last iovec and get
 326                 * out of the loop.
 327                 * Else if we start a new set of bits then fill in the
 328                 * iovec for the series we were looking at and start
 329                 * counting the bits in the new one.
 330                 * Else we're still in the same set of bits so just
 331                 * keep counting and scanning.
 332                 */
 333                if (next_bit == -1) {
 334                        buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
 335                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
 336                        vecp->i_len = nbits * XFS_BLF_CHUNK;
 337                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
 338                        nvecs++;
 339                        break;
 340                } else if (next_bit != last_bit + 1) {
 341                        buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
 342                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
 343                        vecp->i_len = nbits * XFS_BLF_CHUNK;
 344                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
 345                        nvecs++;
 346                        vecp++;
 347                        first_bit = next_bit;
 348                        last_bit = next_bit;
 349                        nbits = 1;
 350                } else if (xfs_buf_offset(bp, offset +
 351                                              (next_bit << XFS_BLF_SHIFT)) !=
 352                           (xfs_buf_offset(bp, offset +
 353                                               (last_bit << XFS_BLF_SHIFT)) +
 354                            XFS_BLF_CHUNK)) {
 355                        buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
 356                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
 357                        vecp->i_len = nbits * XFS_BLF_CHUNK;
 358                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
 359/*
 360 * You would think we need to bump the nvecs here too, but we do not
 361 * this number is used by recovery, and it gets confused by the boundary
 362 * split here
 363 *                      nvecs++;
 364 */
 365                        vecp++;
 366                        first_bit = next_bit;
 367                        last_bit = next_bit;
 368                        nbits = 1;
 369                } else {
 370                        last_bit++;
 371                        nbits++;
 372                }
 373        }
 374        bip->bli_format.blf_size = nvecs;
 375        return vecp;
 376}
 377
 378/*
 379 * This is called to fill in the vector of log iovecs for the
 380 * given log buf item.  It fills the first entry with a buf log
 381 * format structure, and the rest point to contiguous chunks
 382 * within the buffer.
 383 */
 384STATIC void
 385xfs_buf_item_format(
 386        struct xfs_log_item     *lip,
 387        struct xfs_log_iovec    *vecp)
 388{
 389        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 390        struct xfs_buf          *bp = bip->bli_buf;
 391        uint                    offset = 0;
 392        int                     i;
 393
 394        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 395        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
 396               (bip->bli_flags & XFS_BLI_STALE));
 397
 398        /*
 399         * If it is an inode buffer, transfer the in-memory state to the
 400         * format flags and clear the in-memory state. We do not transfer
 401         * this state if the inode buffer allocation has not yet been committed
 402         * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
 403         * correct replay of the inode allocation.
 404         */
 405        if (bip->bli_flags & XFS_BLI_INODE_BUF) {
 406                if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
 407                      xfs_log_item_in_current_chkpt(lip)))
 408                        bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
 409                bip->bli_flags &= ~XFS_BLI_INODE_BUF;
 410        }
 411
 412        for (i = 0; i < bip->bli_format_count; i++) {
 413                vecp = xfs_buf_item_format_segment(bip, vecp, offset,
 414                                                &bip->bli_formats[i]);
 415                offset += bp->b_maps[i].bm_len;
 416        }
 417
 418        /*
 419         * Check to make sure everything is consistent.
 420         */
 421        trace_xfs_buf_item_format(bip);
 422        xfs_buf_item_log_check(bip);
 423}
 424
 425/*
 426 * This is called to pin the buffer associated with the buf log item in memory
 427 * so it cannot be written out.
 428 *
 429 * We also always take a reference to the buffer log item here so that the bli
 430 * is held while the item is pinned in memory. This means that we can
 431 * unconditionally drop the reference count a transaction holds when the
 432 * transaction is completed.
 433 */
 434STATIC void
 435xfs_buf_item_pin(
 436        struct xfs_log_item     *lip)
 437{
 438        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 439
 440        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 441        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
 442               (bip->bli_flags & XFS_BLI_STALE));
 443
 444        trace_xfs_buf_item_pin(bip);
 445
 446        atomic_inc(&bip->bli_refcount);
 447        atomic_inc(&bip->bli_buf->b_pin_count);
 448}
 449
 450/*
 451 * This is called to unpin the buffer associated with the buf log
 452 * item which was previously pinned with a call to xfs_buf_item_pin().
 453 *
 454 * Also drop the reference to the buf item for the current transaction.
 455 * If the XFS_BLI_STALE flag is set and we are the last reference,
 456 * then free up the buf log item and unlock the buffer.
 457 *
 458 * If the remove flag is set we are called from uncommit in the
 459 * forced-shutdown path.  If that is true and the reference count on
 460 * the log item is going to drop to zero we need to free the item's
 461 * descriptor in the transaction.
 462 */
 463STATIC void
 464xfs_buf_item_unpin(
 465        struct xfs_log_item     *lip,
 466        int                     remove)
 467{
 468        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 469        xfs_buf_t       *bp = bip->bli_buf;
 470        struct xfs_ail  *ailp = lip->li_ailp;
 471        int             stale = bip->bli_flags & XFS_BLI_STALE;
 472        int             freed;
 473
 474        ASSERT(bp->b_fspriv == bip);
 475        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 476
 477        trace_xfs_buf_item_unpin(bip);
 478
 479        freed = atomic_dec_and_test(&bip->bli_refcount);
 480
 481        if (atomic_dec_and_test(&bp->b_pin_count))
 482                wake_up_all(&bp->b_waiters);
 483
 484        if (freed && stale) {
 485                ASSERT(bip->bli_flags & XFS_BLI_STALE);
 486                ASSERT(xfs_buf_islocked(bp));
 487                ASSERT(XFS_BUF_ISSTALE(bp));
 488                ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
 489
 490                trace_xfs_buf_item_unpin_stale(bip);
 491
 492                if (remove) {
 493                        /*
 494                         * If we are in a transaction context, we have to
 495                         * remove the log item from the transaction as we are
 496                         * about to release our reference to the buffer.  If we
 497                         * don't, the unlock that occurs later in
 498                         * xfs_trans_uncommit() will try to reference the
 499                         * buffer which we no longer have a hold on.
 500                         */
 501                        if (lip->li_desc)
 502                                xfs_trans_del_item(lip);
 503
 504                        /*
 505                         * Since the transaction no longer refers to the buffer,
 506                         * the buffer should no longer refer to the transaction.
 507                         */
 508                        bp->b_transp = NULL;
 509                }
 510
 511                /*
 512                 * If we get called here because of an IO error, we may
 513                 * or may not have the item on the AIL. xfs_trans_ail_delete()
 514                 * will take care of that situation.
 515                 * xfs_trans_ail_delete() drops the AIL lock.
 516                 */
 517                if (bip->bli_flags & XFS_BLI_STALE_INODE) {
 518                        xfs_buf_do_callbacks(bp);
 519                        bp->b_fspriv = NULL;
 520                        bp->b_iodone = NULL;
 521                } else {
 522                        spin_lock(&ailp->xa_lock);
 523                        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
 524                        xfs_buf_item_relse(bp);
 525                        ASSERT(bp->b_fspriv == NULL);
 526                }
 527                xfs_buf_relse(bp);
 528        } else if (freed && remove) {
 529                xfs_buf_lock(bp);
 530                xfs_buf_ioerror(bp, EIO);
 531                XFS_BUF_UNDONE(bp);
 532                xfs_buf_stale(bp);
 533                xfs_buf_ioend(bp, 0);
 534        }
 535}
 536
 537STATIC uint
 538xfs_buf_item_push(
 539        struct xfs_log_item     *lip,
 540        struct list_head        *buffer_list)
 541{
 542        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 543        struct xfs_buf          *bp = bip->bli_buf;
 544        uint                    rval = XFS_ITEM_SUCCESS;
 545
 546        if (xfs_buf_ispinned(bp))
 547                return XFS_ITEM_PINNED;
 548        if (!xfs_buf_trylock(bp))
 549                return XFS_ITEM_LOCKED;
 550
 551        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 552
 553        trace_xfs_buf_item_push(bip);
 554
 555        if (!xfs_buf_delwri_queue(bp, buffer_list))
 556                rval = XFS_ITEM_FLUSHING;
 557        xfs_buf_unlock(bp);
 558        return rval;
 559}
 560
 561/*
 562 * Release the buffer associated with the buf log item.  If there is no dirty
 563 * logged data associated with the buffer recorded in the buf log item, then
 564 * free the buf log item and remove the reference to it in the buffer.
 565 *
 566 * This call ignores the recursion count.  It is only called when the buffer
 567 * should REALLY be unlocked, regardless of the recursion count.
 568 *
 569 * We unconditionally drop the transaction's reference to the log item. If the
 570 * item was logged, then another reference was taken when it was pinned, so we
 571 * can safely drop the transaction reference now.  This also allows us to avoid
 572 * potential races with the unpin code freeing the bli by not referencing the
 573 * bli after we've dropped the reference count.
 574 *
 575 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
 576 * if necessary but do not unlock the buffer.  This is for support of
 577 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
 578 * free the item.
 579 */
 580STATIC void
 581xfs_buf_item_unlock(
 582        struct xfs_log_item     *lip)
 583{
 584        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 585        struct xfs_buf          *bp = bip->bli_buf;
 586        int                     aborted;
 587        uint                    hold;
 588
 589        /* Clear the buffer's association with this transaction. */
 590        bp->b_transp = NULL;
 591
 592        /*
 593         * If this is a transaction abort, don't return early.  Instead, allow
 594         * the brelse to happen.  Normally it would be done for stale
 595         * (cancelled) buffers at unpin time, but we'll never go through the
 596         * pin/unpin cycle if we abort inside commit.
 597         */
 598        aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
 599
 600        /*
 601         * Before possibly freeing the buf item, determine if we should
 602         * release the buffer at the end of this routine.
 603         */
 604        hold = bip->bli_flags & XFS_BLI_HOLD;
 605
 606        /* Clear the per transaction state. */
 607        bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
 608
 609        /*
 610         * If the buf item is marked stale, then don't do anything.  We'll
 611         * unlock the buffer and free the buf item when the buffer is unpinned
 612         * for the last time.
 613         */
 614        if (bip->bli_flags & XFS_BLI_STALE) {
 615                trace_xfs_buf_item_unlock_stale(bip);
 616                ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
 617                if (!aborted) {
 618                        atomic_dec(&bip->bli_refcount);
 619                        return;
 620                }
 621        }
 622
 623        trace_xfs_buf_item_unlock(bip);
 624
 625        /*
 626         * If the buf item isn't tracking any data, free it, otherwise drop the
 627         * reference we hold to it.
 628         */
 629        if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
 630                             bip->bli_format.blf_map_size))
 631                xfs_buf_item_relse(bp);
 632        else
 633                atomic_dec(&bip->bli_refcount);
 634
 635        if (!hold)
 636                xfs_buf_relse(bp);
 637}
 638
 639/*
 640 * This is called to find out where the oldest active copy of the
 641 * buf log item in the on disk log resides now that the last log
 642 * write of it completed at the given lsn.
 643 * We always re-log all the dirty data in a buffer, so usually the
 644 * latest copy in the on disk log is the only one that matters.  For
 645 * those cases we simply return the given lsn.
 646 *
 647 * The one exception to this is for buffers full of newly allocated
 648 * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
 649 * flag set, indicating that only the di_next_unlinked fields from the
 650 * inodes in the buffers will be replayed during recovery.  If the
 651 * original newly allocated inode images have not yet been flushed
 652 * when the buffer is so relogged, then we need to make sure that we
 653 * keep the old images in the 'active' portion of the log.  We do this
 654 * by returning the original lsn of that transaction here rather than
 655 * the current one.
 656 */
 657STATIC xfs_lsn_t
 658xfs_buf_item_committed(
 659        struct xfs_log_item     *lip,
 660        xfs_lsn_t               lsn)
 661{
 662        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 663
 664        trace_xfs_buf_item_committed(bip);
 665
 666        if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
 667                return lip->li_lsn;
 668        return lsn;
 669}
 670
 671STATIC void
 672xfs_buf_item_committing(
 673        struct xfs_log_item     *lip,
 674        xfs_lsn_t               commit_lsn)
 675{
 676}
 677
 678/*
 679 * This is the ops vector shared by all buf log items.
 680 */
 681static const struct xfs_item_ops xfs_buf_item_ops = {
 682        .iop_size       = xfs_buf_item_size,
 683        .iop_format     = xfs_buf_item_format,
 684        .iop_pin        = xfs_buf_item_pin,
 685        .iop_unpin      = xfs_buf_item_unpin,
 686        .iop_unlock     = xfs_buf_item_unlock,
 687        .iop_committed  = xfs_buf_item_committed,
 688        .iop_push       = xfs_buf_item_push,
 689        .iop_committing = xfs_buf_item_committing
 690};
 691
 692STATIC int
 693xfs_buf_item_get_format(
 694        struct xfs_buf_log_item *bip,
 695        int                     count)
 696{
 697        ASSERT(bip->bli_formats == NULL);
 698        bip->bli_format_count = count;
 699
 700        if (count == 1) {
 701                bip->bli_formats = &bip->bli_format;
 702                return 0;
 703        }
 704
 705        bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
 706                                KM_SLEEP);
 707        if (!bip->bli_formats)
 708                return ENOMEM;
 709        return 0;
 710}
 711
 712STATIC void
 713xfs_buf_item_free_format(
 714        struct xfs_buf_log_item *bip)
 715{
 716        if (bip->bli_formats != &bip->bli_format) {
 717                kmem_free(bip->bli_formats);
 718                bip->bli_formats = NULL;
 719        }
 720}
 721
 722/*
 723 * Allocate a new buf log item to go with the given buffer.
 724 * Set the buffer's b_fsprivate field to point to the new
 725 * buf log item.  If there are other item's attached to the
 726 * buffer (see xfs_buf_attach_iodone() below), then put the
 727 * buf log item at the front.
 728 */
 729void
 730xfs_buf_item_init(
 731        xfs_buf_t       *bp,
 732        xfs_mount_t     *mp)
 733{
 734        xfs_log_item_t          *lip = bp->b_fspriv;
 735        xfs_buf_log_item_t      *bip;
 736        int                     chunks;
 737        int                     map_size;
 738        int                     error;
 739        int                     i;
 740
 741        /*
 742         * Check to see if there is already a buf log item for
 743         * this buffer.  If there is, it is guaranteed to be
 744         * the first.  If we do already have one, there is
 745         * nothing to do here so return.
 746         */
 747        ASSERT(bp->b_target->bt_mount == mp);
 748        if (lip != NULL && lip->li_type == XFS_LI_BUF)
 749                return;
 750
 751        bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
 752        xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
 753        bip->bli_buf = bp;
 754        xfs_buf_hold(bp);
 755
 756        /*
 757         * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
 758         * can be divided into. Make sure not to truncate any pieces.
 759         * map_size is the size of the bitmap needed to describe the
 760         * chunks of the buffer.
 761         *
 762         * Discontiguous buffer support follows the layout of the underlying
 763         * buffer. This makes the implementation as simple as possible.
 764         */
 765        error = xfs_buf_item_get_format(bip, bp->b_map_count);
 766        ASSERT(error == 0);
 767
 768        for (i = 0; i < bip->bli_format_count; i++) {
 769                chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
 770                                      XFS_BLF_CHUNK);
 771                map_size = DIV_ROUND_UP(chunks, NBWORD);
 772
 773                bip->bli_formats[i].blf_type = XFS_LI_BUF;
 774                bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
 775                bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
 776                bip->bli_formats[i].blf_map_size = map_size;
 777        }
 778
 779#ifdef XFS_TRANS_DEBUG
 780        /*
 781         * Allocate the arrays for tracking what needs to be logged
 782         * and what our callers request to be logged.  bli_orig
 783         * holds a copy of the original, clean buffer for comparison
 784         * against, and bli_logged keeps a 1 bit flag per byte in
 785         * the buffer to indicate which bytes the callers have asked
 786         * to have logged.
 787         */
 788        bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
 789        memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
 790        bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
 791#endif
 792
 793        /*
 794         * Put the buf item into the list of items attached to the
 795         * buffer at the front.
 796         */
 797        if (bp->b_fspriv)
 798                bip->bli_item.li_bio_list = bp->b_fspriv;
 799        bp->b_fspriv = bip;
 800}
 801
 802
 803/*
 804 * Mark bytes first through last inclusive as dirty in the buf
 805 * item's bitmap.
 806 */
 807void
 808xfs_buf_item_log_segment(
 809        struct xfs_buf_log_item *bip,
 810        uint                    first,
 811        uint                    last,
 812        uint                    *map)
 813{
 814        uint            first_bit;
 815        uint            last_bit;
 816        uint            bits_to_set;
 817        uint            bits_set;
 818        uint            word_num;
 819        uint            *wordp;
 820        uint            bit;
 821        uint            end_bit;
 822        uint            mask;
 823
 824        /*
 825         * Convert byte offsets to bit numbers.
 826         */
 827        first_bit = first >> XFS_BLF_SHIFT;
 828        last_bit = last >> XFS_BLF_SHIFT;
 829
 830        /*
 831         * Calculate the total number of bits to be set.
 832         */
 833        bits_to_set = last_bit - first_bit + 1;
 834
 835        /*
 836         * Get a pointer to the first word in the bitmap
 837         * to set a bit in.
 838         */
 839        word_num = first_bit >> BIT_TO_WORD_SHIFT;
 840        wordp = &map[word_num];
 841
 842        /*
 843         * Calculate the starting bit in the first word.
 844         */
 845        bit = first_bit & (uint)(NBWORD - 1);
 846
 847        /*
 848         * First set any bits in the first word of our range.
 849         * If it starts at bit 0 of the word, it will be
 850         * set below rather than here.  That is what the variable
 851         * bit tells us. The variable bits_set tracks the number
 852         * of bits that have been set so far.  End_bit is the number
 853         * of the last bit to be set in this word plus one.
 854         */
 855        if (bit) {
 856                end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
 857                mask = ((1 << (end_bit - bit)) - 1) << bit;
 858                *wordp |= mask;
 859                wordp++;
 860                bits_set = end_bit - bit;
 861        } else {
 862                bits_set = 0;
 863        }
 864
 865        /*
 866         * Now set bits a whole word at a time that are between
 867         * first_bit and last_bit.
 868         */
 869        while ((bits_to_set - bits_set) >= NBWORD) {
 870                *wordp |= 0xffffffff;
 871                bits_set += NBWORD;
 872                wordp++;
 873        }
 874
 875        /*
 876         * Finally, set any bits left to be set in one last partial word.
 877         */
 878        end_bit = bits_to_set - bits_set;
 879        if (end_bit) {
 880                mask = (1 << end_bit) - 1;
 881                *wordp |= mask;
 882        }
 883
 884        xfs_buf_item_log_debug(bip, first, last);
 885}
 886
 887/*
 888 * Mark bytes first through last inclusive as dirty in the buf
 889 * item's bitmap.
 890 */
 891void
 892xfs_buf_item_log(
 893        xfs_buf_log_item_t      *bip,
 894        uint                    first,
 895        uint                    last)
 896{
 897        int                     i;
 898        uint                    start;
 899        uint                    end;
 900        struct xfs_buf          *bp = bip->bli_buf;
 901
 902        /*
 903         * Mark the item as having some dirty data for
 904         * quick reference in xfs_buf_item_dirty.
 905         */
 906        bip->bli_flags |= XFS_BLI_DIRTY;
 907
 908        /*
 909         * walk each buffer segment and mark them dirty appropriately.
 910         */
 911        start = 0;
 912        for (i = 0; i < bip->bli_format_count; i++) {
 913                if (start > last)
 914                        break;
 915                end = start + BBTOB(bp->b_maps[i].bm_len);
 916                if (first > end) {
 917                        start += BBTOB(bp->b_maps[i].bm_len);
 918                        continue;
 919                }
 920                if (first < start)
 921                        first = start;
 922                if (end > last)
 923                        end = last;
 924
 925                xfs_buf_item_log_segment(bip, first, end,
 926                                         &bip->bli_formats[i].blf_data_map[0]);
 927
 928                start += bp->b_maps[i].bm_len;
 929        }
 930}
 931
 932
 933/*
 934 * Return 1 if the buffer has some data that has been logged (at any
 935 * point, not just the current transaction) and 0 if not.
 936 */
 937uint
 938xfs_buf_item_dirty(
 939        xfs_buf_log_item_t      *bip)
 940{
 941        return (bip->bli_flags & XFS_BLI_DIRTY);
 942}
 943
 944STATIC void
 945xfs_buf_item_free(
 946        xfs_buf_log_item_t      *bip)
 947{
 948#ifdef XFS_TRANS_DEBUG
 949        kmem_free(bip->bli_orig);
 950        kmem_free(bip->bli_logged);
 951#endif /* XFS_TRANS_DEBUG */
 952
 953        xfs_buf_item_free_format(bip);
 954        kmem_zone_free(xfs_buf_item_zone, bip);
 955}
 956
 957/*
 958 * This is called when the buf log item is no longer needed.  It should
 959 * free the buf log item associated with the given buffer and clear
 960 * the buffer's pointer to the buf log item.  If there are no more
 961 * items in the list, clear the b_iodone field of the buffer (see
 962 * xfs_buf_attach_iodone() below).
 963 */
 964void
 965xfs_buf_item_relse(
 966        xfs_buf_t       *bp)
 967{
 968        xfs_buf_log_item_t      *bip;
 969
 970        trace_xfs_buf_item_relse(bp, _RET_IP_);
 971
 972        bip = bp->b_fspriv;
 973        bp->b_fspriv = bip->bli_item.li_bio_list;
 974        if (bp->b_fspriv == NULL)
 975                bp->b_iodone = NULL;
 976
 977        xfs_buf_rele(bp);
 978        xfs_buf_item_free(bip);
 979}
 980
 981
 982/*
 983 * Add the given log item with its callback to the list of callbacks
 984 * to be called when the buffer's I/O completes.  If it is not set
 985 * already, set the buffer's b_iodone() routine to be
 986 * xfs_buf_iodone_callbacks() and link the log item into the list of
 987 * items rooted at b_fsprivate.  Items are always added as the second
 988 * entry in the list if there is a first, because the buf item code
 989 * assumes that the buf log item is first.
 990 */
 991void
 992xfs_buf_attach_iodone(
 993        xfs_buf_t       *bp,
 994        void            (*cb)(xfs_buf_t *, xfs_log_item_t *),
 995        xfs_log_item_t  *lip)
 996{
 997        xfs_log_item_t  *head_lip;
 998
 999        ASSERT(xfs_buf_islocked(bp));
1000
1001        lip->li_cb = cb;
1002        head_lip = bp->b_fspriv;
1003        if (head_lip) {
1004                lip->li_bio_list = head_lip->li_bio_list;
1005                head_lip->li_bio_list = lip;
1006        } else {
1007                bp->b_fspriv = lip;
1008        }
1009
1010        ASSERT(bp->b_iodone == NULL ||
1011               bp->b_iodone == xfs_buf_iodone_callbacks);
1012        bp->b_iodone = xfs_buf_iodone_callbacks;
1013}
1014
1015/*
1016 * We can have many callbacks on a buffer. Running the callbacks individually
1017 * can cause a lot of contention on the AIL lock, so we allow for a single
1018 * callback to be able to scan the remaining lip->li_bio_list for other items
1019 * of the same type and callback to be processed in the first call.
1020 *
1021 * As a result, the loop walking the callback list below will also modify the
1022 * list. it removes the first item from the list and then runs the callback.
1023 * The loop then restarts from the new head of the list. This allows the
1024 * callback to scan and modify the list attached to the buffer and we don't
1025 * have to care about maintaining a next item pointer.
1026 */
1027STATIC void
1028xfs_buf_do_callbacks(
1029        struct xfs_buf          *bp)
1030{
1031        struct xfs_log_item     *lip;
1032
1033        while ((lip = bp->b_fspriv) != NULL) {
1034                bp->b_fspriv = lip->li_bio_list;
1035                ASSERT(lip->li_cb != NULL);
1036                /*
1037                 * Clear the next pointer so we don't have any
1038                 * confusion if the item is added to another buf.
1039                 * Don't touch the log item after calling its
1040                 * callback, because it could have freed itself.
1041                 */
1042                lip->li_bio_list = NULL;
1043                lip->li_cb(bp, lip);
1044        }
1045}
1046
1047/*
1048 * This is the iodone() function for buffers which have had callbacks
1049 * attached to them by xfs_buf_attach_iodone().  It should remove each
1050 * log item from the buffer's list and call the callback of each in turn.
1051 * When done, the buffer's fsprivate field is set to NULL and the buffer
1052 * is unlocked with a call to iodone().
1053 */
1054void
1055xfs_buf_iodone_callbacks(
1056        struct xfs_buf          *bp)
1057{
1058        struct xfs_log_item     *lip = bp->b_fspriv;
1059        struct xfs_mount        *mp = lip->li_mountp;
1060        static ulong            lasttime;
1061        static xfs_buftarg_t    *lasttarg;
1062
1063        if (likely(!xfs_buf_geterror(bp)))
1064                goto do_callbacks;
1065
1066        /*
1067         * If we've already decided to shutdown the filesystem because of
1068         * I/O errors, there's no point in giving this a retry.
1069         */
1070        if (XFS_FORCED_SHUTDOWN(mp)) {
1071                xfs_buf_stale(bp);
1072                XFS_BUF_DONE(bp);
1073                trace_xfs_buf_item_iodone(bp, _RET_IP_);
1074                goto do_callbacks;
1075        }
1076
1077        if (bp->b_target != lasttarg ||
1078            time_after(jiffies, (lasttime + 5*HZ))) {
1079                lasttime = jiffies;
1080                xfs_buf_ioerror_alert(bp, __func__);
1081        }
1082        lasttarg = bp->b_target;
1083
1084        /*
1085         * If the write was asynchronous then no one will be looking for the
1086         * error.  Clear the error state and write the buffer out again.
1087         *
1088         * XXX: This helps against transient write errors, but we need to find
1089         * a way to shut the filesystem down if the writes keep failing.
1090         *
1091         * In practice we'll shut the filesystem down soon as non-transient
1092         * erorrs tend to affect the whole device and a failing log write
1093         * will make us give up.  But we really ought to do better here.
1094         */
1095        if (XFS_BUF_ISASYNC(bp)) {
1096                ASSERT(bp->b_iodone != NULL);
1097
1098                trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1099
1100                xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1101
1102                if (!XFS_BUF_ISSTALE(bp)) {
1103                        bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
1104                        xfs_buf_iorequest(bp);
1105                } else {
1106                        xfs_buf_relse(bp);
1107                }
1108
1109                return;
1110        }
1111
1112        /*
1113         * If the write of the buffer was synchronous, we want to make
1114         * sure to return the error to the caller of xfs_bwrite().
1115         */
1116        xfs_buf_stale(bp);
1117        XFS_BUF_DONE(bp);
1118
1119        trace_xfs_buf_error_relse(bp, _RET_IP_);
1120
1121do_callbacks:
1122        xfs_buf_do_callbacks(bp);
1123        bp->b_fspriv = NULL;
1124        bp->b_iodone = NULL;
1125        xfs_buf_ioend(bp, 0);
1126}
1127
1128/*
1129 * This is the iodone() function for buffers which have been
1130 * logged.  It is called when they are eventually flushed out.
1131 * It should remove the buf item from the AIL, and free the buf item.
1132 * It is called by xfs_buf_iodone_callbacks() above which will take
1133 * care of cleaning up the buffer itself.
1134 */
1135void
1136xfs_buf_iodone(
1137        struct xfs_buf          *bp,
1138        struct xfs_log_item     *lip)
1139{
1140        struct xfs_ail          *ailp = lip->li_ailp;
1141
1142        ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1143
1144        xfs_buf_rele(bp);
1145
1146        /*
1147         * If we are forcibly shutting down, this may well be
1148         * off the AIL already. That's because we simulate the
1149         * log-committed callbacks to unpin these buffers. Or we may never
1150         * have put this item on AIL because of the transaction was
1151         * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1152         *
1153         * Either way, AIL is useless if we're forcing a shutdown.
1154         */
1155        spin_lock(&ailp->xa_lock);
1156        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1157        xfs_buf_item_free(BUF_ITEM(lip));
1158}
1159
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.