linux/fs/xfs/xfs_trans_buf.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_log.h"
  22#include "xfs_trans.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_mount.h"
  26#include "xfs_bmap_btree.h"
  27#include "xfs_alloc_btree.h"
  28#include "xfs_ialloc_btree.h"
  29#include "xfs_dinode.h"
  30#include "xfs_inode.h"
  31#include "xfs_buf_item.h"
  32#include "xfs_trans_priv.h"
  33#include "xfs_error.h"
  34#include "xfs_trace.h"
  35
  36/*
  37 * Check to see if a buffer matching the given parameters is already
  38 * a part of the given transaction.
  39 */
  40STATIC struct xfs_buf *
  41xfs_trans_buf_item_match(
  42        struct xfs_trans        *tp,
  43        struct xfs_buftarg      *target,
  44        struct xfs_buf_map      *map,
  45        int                     nmaps)
  46{
  47        struct xfs_log_item_desc *lidp;
  48        struct xfs_buf_log_item *blip;
  49        int                     len = 0;
  50        int                     i;
  51
  52        for (i = 0; i < nmaps; i++)
  53                len += map[i].bm_len;
  54
  55        list_for_each_entry(lidp, &tp->t_items, lid_trans) {
  56                blip = (struct xfs_buf_log_item *)lidp->lid_item;
  57                if (blip->bli_item.li_type == XFS_LI_BUF &&
  58                    blip->bli_buf->b_target == target &&
  59                    XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
  60                    blip->bli_buf->b_length == len) {
  61                        ASSERT(blip->bli_buf->b_map_count == nmaps);
  62                        return blip->bli_buf;
  63                }
  64        }
  65
  66        return NULL;
  67}
  68
  69/*
  70 * Add the locked buffer to the transaction.
  71 *
  72 * The buffer must be locked, and it cannot be associated with any
  73 * transaction.
  74 *
  75 * If the buffer does not yet have a buf log item associated with it,
  76 * then allocate one for it.  Then add the buf item to the transaction.
  77 */
  78STATIC void
  79_xfs_trans_bjoin(
  80        struct xfs_trans        *tp,
  81        struct xfs_buf          *bp,
  82        int                     reset_recur)
  83{
  84        struct xfs_buf_log_item *bip;
  85
  86        ASSERT(bp->b_transp == NULL);
  87
  88        /*
  89         * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
  90         * it doesn't have one yet, then allocate one and initialize it.
  91         * The checks to see if one is there are in xfs_buf_item_init().
  92         */
  93        xfs_buf_item_init(bp, tp->t_mountp);
  94        bip = bp->b_fspriv;
  95        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
  96        ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
  97        ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
  98        if (reset_recur)
  99                bip->bli_recur = 0;
 100
 101        /*
 102         * Take a reference for this transaction on the buf item.
 103         */
 104        atomic_inc(&bip->bli_refcount);
 105
 106        /*
 107         * Get a log_item_desc to point at the new item.
 108         */
 109        xfs_trans_add_item(tp, &bip->bli_item);
 110
 111        /*
 112         * Initialize b_fsprivate2 so we can find it with incore_match()
 113         * in xfs_trans_get_buf() and friends above.
 114         */
 115        bp->b_transp = tp;
 116
 117}
 118
 119void
 120xfs_trans_bjoin(
 121        struct xfs_trans        *tp,
 122        struct xfs_buf          *bp)
 123{
 124        _xfs_trans_bjoin(tp, bp, 0);
 125        trace_xfs_trans_bjoin(bp->b_fspriv);
 126}
 127
 128/*
 129 * Get and lock the buffer for the caller if it is not already
 130 * locked within the given transaction.  If it is already locked
 131 * within the transaction, just increment its lock recursion count
 132 * and return a pointer to it.
 133 *
 134 * If the transaction pointer is NULL, make this just a normal
 135 * get_buf() call.
 136 */
 137struct xfs_buf *
 138xfs_trans_get_buf_map(
 139        struct xfs_trans        *tp,
 140        struct xfs_buftarg      *target,
 141        struct xfs_buf_map      *map,
 142        int                     nmaps,
 143        xfs_buf_flags_t         flags)
 144{
 145        xfs_buf_t               *bp;
 146        xfs_buf_log_item_t      *bip;
 147
 148        if (!tp)
 149                return xfs_buf_get_map(target, map, nmaps, flags);
 150
 151        /*
 152         * If we find the buffer in the cache with this transaction
 153         * pointer in its b_fsprivate2 field, then we know we already
 154         * have it locked.  In this case we just increment the lock
 155         * recursion count and return the buffer to the caller.
 156         */
 157        bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
 158        if (bp != NULL) {
 159                ASSERT(xfs_buf_islocked(bp));
 160                if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
 161                        xfs_buf_stale(bp);
 162                        XFS_BUF_DONE(bp);
 163                }
 164
 165                ASSERT(bp->b_transp == tp);
 166                bip = bp->b_fspriv;
 167                ASSERT(bip != NULL);
 168                ASSERT(atomic_read(&bip->bli_refcount) > 0);
 169                bip->bli_recur++;
 170                trace_xfs_trans_get_buf_recur(bip);
 171                return (bp);
 172        }
 173
 174        bp = xfs_buf_get_map(target, map, nmaps, flags);
 175        if (bp == NULL) {
 176                return NULL;
 177        }
 178
 179        ASSERT(!bp->b_error);
 180
 181        _xfs_trans_bjoin(tp, bp, 1);
 182        trace_xfs_trans_get_buf(bp->b_fspriv);
 183        return (bp);
 184}
 185
 186/*
 187 * Get and lock the superblock buffer of this file system for the
 188 * given transaction.
 189 *
 190 * We don't need to use incore_match() here, because the superblock
 191 * buffer is a private buffer which we keep a pointer to in the
 192 * mount structure.
 193 */
 194xfs_buf_t *
 195xfs_trans_getsb(xfs_trans_t     *tp,
 196                struct xfs_mount *mp,
 197                int             flags)
 198{
 199        xfs_buf_t               *bp;
 200        xfs_buf_log_item_t      *bip;
 201
 202        /*
 203         * Default to just trying to lock the superblock buffer
 204         * if tp is NULL.
 205         */
 206        if (tp == NULL) {
 207                return (xfs_getsb(mp, flags));
 208        }
 209
 210        /*
 211         * If the superblock buffer already has this transaction
 212         * pointer in its b_fsprivate2 field, then we know we already
 213         * have it locked.  In this case we just increment the lock
 214         * recursion count and return the buffer to the caller.
 215         */
 216        bp = mp->m_sb_bp;
 217        if (bp->b_transp == tp) {
 218                bip = bp->b_fspriv;
 219                ASSERT(bip != NULL);
 220                ASSERT(atomic_read(&bip->bli_refcount) > 0);
 221                bip->bli_recur++;
 222                trace_xfs_trans_getsb_recur(bip);
 223                return (bp);
 224        }
 225
 226        bp = xfs_getsb(mp, flags);
 227        if (bp == NULL)
 228                return NULL;
 229
 230        _xfs_trans_bjoin(tp, bp, 1);
 231        trace_xfs_trans_getsb(bp->b_fspriv);
 232        return (bp);
 233}
 234
 235#ifdef DEBUG
 236xfs_buftarg_t *xfs_error_target;
 237int     xfs_do_error;
 238int     xfs_req_num;
 239int     xfs_error_mod = 33;
 240#endif
 241
 242/*
 243 * Get and lock the buffer for the caller if it is not already
 244 * locked within the given transaction.  If it has not yet been
 245 * read in, read it from disk. If it is already locked
 246 * within the transaction and already read in, just increment its
 247 * lock recursion count and return a pointer to it.
 248 *
 249 * If the transaction pointer is NULL, make this just a normal
 250 * read_buf() call.
 251 */
 252int
 253xfs_trans_read_buf_map(
 254        struct xfs_mount        *mp,
 255        struct xfs_trans        *tp,
 256        struct xfs_buftarg      *target,
 257        struct xfs_buf_map      *map,
 258        int                     nmaps,
 259        xfs_buf_flags_t         flags,
 260        struct xfs_buf          **bpp)
 261{
 262        xfs_buf_t               *bp;
 263        xfs_buf_log_item_t      *bip;
 264        int                     error;
 265
 266        *bpp = NULL;
 267        if (!tp) {
 268                bp = xfs_buf_read_map(target, map, nmaps, flags);
 269                if (!bp)
 270                        return (flags & XBF_TRYLOCK) ?
 271                                        EAGAIN : XFS_ERROR(ENOMEM);
 272
 273                if (bp->b_error) {
 274                        error = bp->b_error;
 275                        xfs_buf_ioerror_alert(bp, __func__);
 276                        XFS_BUF_UNDONE(bp);
 277                        xfs_buf_stale(bp);
 278                        xfs_buf_relse(bp);
 279                        return error;
 280                }
 281#ifdef DEBUG
 282                if (xfs_do_error) {
 283                        if (xfs_error_target == target) {
 284                                if (((xfs_req_num++) % xfs_error_mod) == 0) {
 285                                        xfs_buf_relse(bp);
 286                                        xfs_debug(mp, "Returning error!");
 287                                        return XFS_ERROR(EIO);
 288                                }
 289                        }
 290                }
 291#endif
 292                if (XFS_FORCED_SHUTDOWN(mp))
 293                        goto shutdown_abort;
 294                *bpp = bp;
 295                return 0;
 296        }
 297
 298        /*
 299         * If we find the buffer in the cache with this transaction
 300         * pointer in its b_fsprivate2 field, then we know we already
 301         * have it locked.  If it is already read in we just increment
 302         * the lock recursion count and return the buffer to the caller.
 303         * If the buffer is not yet read in, then we read it in, increment
 304         * the lock recursion count, and return it to the caller.
 305         */
 306        bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
 307        if (bp != NULL) {
 308                ASSERT(xfs_buf_islocked(bp));
 309                ASSERT(bp->b_transp == tp);
 310                ASSERT(bp->b_fspriv != NULL);
 311                ASSERT(!bp->b_error);
 312                if (!(XFS_BUF_ISDONE(bp))) {
 313                        trace_xfs_trans_read_buf_io(bp, _RET_IP_);
 314                        ASSERT(!XFS_BUF_ISASYNC(bp));
 315                        XFS_BUF_READ(bp);
 316                        xfsbdstrat(tp->t_mountp, bp);
 317                        error = xfs_buf_iowait(bp);
 318                        if (error) {
 319                                xfs_buf_ioerror_alert(bp, __func__);
 320                                xfs_buf_relse(bp);
 321                                /*
 322                                 * We can gracefully recover from most read
 323                                 * errors. Ones we can't are those that happen
 324                                 * after the transaction's already dirty.
 325                                 */
 326                                if (tp->t_flags & XFS_TRANS_DIRTY)
 327                                        xfs_force_shutdown(tp->t_mountp,
 328                                                        SHUTDOWN_META_IO_ERROR);
 329                                return error;
 330                        }
 331                }
 332                /*
 333                 * We never locked this buf ourselves, so we shouldn't
 334                 * brelse it either. Just get out.
 335                 */
 336                if (XFS_FORCED_SHUTDOWN(mp)) {
 337                        trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
 338                        *bpp = NULL;
 339                        return XFS_ERROR(EIO);
 340                }
 341
 342
 343                bip = bp->b_fspriv;
 344                bip->bli_recur++;
 345
 346                ASSERT(atomic_read(&bip->bli_refcount) > 0);
 347                trace_xfs_trans_read_buf_recur(bip);
 348                *bpp = bp;
 349                return 0;
 350        }
 351
 352        bp = xfs_buf_read_map(target, map, nmaps, flags);
 353        if (bp == NULL) {
 354                *bpp = NULL;
 355                return (flags & XBF_TRYLOCK) ?
 356                                        0 : XFS_ERROR(ENOMEM);
 357        }
 358        if (bp->b_error) {
 359                error = bp->b_error;
 360                xfs_buf_stale(bp);
 361                XFS_BUF_DONE(bp);
 362                xfs_buf_ioerror_alert(bp, __func__);
 363                if (tp->t_flags & XFS_TRANS_DIRTY)
 364                        xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
 365                xfs_buf_relse(bp);
 366                return error;
 367        }
 368#ifdef DEBUG
 369        if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
 370                if (xfs_error_target == target) {
 371                        if (((xfs_req_num++) % xfs_error_mod) == 0) {
 372                                xfs_force_shutdown(tp->t_mountp,
 373                                                   SHUTDOWN_META_IO_ERROR);
 374                                xfs_buf_relse(bp);
 375                                xfs_debug(mp, "Returning trans error!");
 376                                return XFS_ERROR(EIO);
 377                        }
 378                }
 379        }
 380#endif
 381        if (XFS_FORCED_SHUTDOWN(mp))
 382                goto shutdown_abort;
 383
 384        _xfs_trans_bjoin(tp, bp, 1);
 385        trace_xfs_trans_read_buf(bp->b_fspriv);
 386
 387        *bpp = bp;
 388        return 0;
 389
 390shutdown_abort:
 391        trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
 392        xfs_buf_relse(bp);
 393        *bpp = NULL;
 394        return XFS_ERROR(EIO);
 395}
 396
 397
 398/*
 399 * Release the buffer bp which was previously acquired with one of the
 400 * xfs_trans_... buffer allocation routines if the buffer has not
 401 * been modified within this transaction.  If the buffer is modified
 402 * within this transaction, do decrement the recursion count but do
 403 * not release the buffer even if the count goes to 0.  If the buffer is not
 404 * modified within the transaction, decrement the recursion count and
 405 * release the buffer if the recursion count goes to 0.
 406 *
 407 * If the buffer is to be released and it was not modified before
 408 * this transaction began, then free the buf_log_item associated with it.
 409 *
 410 * If the transaction pointer is NULL, make this just a normal
 411 * brelse() call.
 412 */
 413void
 414xfs_trans_brelse(xfs_trans_t    *tp,
 415                 xfs_buf_t      *bp)
 416{
 417        xfs_buf_log_item_t      *bip;
 418
 419        /*
 420         * Default to a normal brelse() call if the tp is NULL.
 421         */
 422        if (tp == NULL) {
 423                ASSERT(bp->b_transp == NULL);
 424                xfs_buf_relse(bp);
 425                return;
 426        }
 427
 428        ASSERT(bp->b_transp == tp);
 429        bip = bp->b_fspriv;
 430        ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
 431        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 432        ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
 433        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 434
 435        trace_xfs_trans_brelse(bip);
 436
 437        /*
 438         * If the release is just for a recursive lock,
 439         * then decrement the count and return.
 440         */
 441        if (bip->bli_recur > 0) {
 442                bip->bli_recur--;
 443                return;
 444        }
 445
 446        /*
 447         * If the buffer is dirty within this transaction, we can't
 448         * release it until we commit.
 449         */
 450        if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
 451                return;
 452
 453        /*
 454         * If the buffer has been invalidated, then we can't release
 455         * it until the transaction commits to disk unless it is re-dirtied
 456         * as part of this transaction.  This prevents us from pulling
 457         * the item from the AIL before we should.
 458         */
 459        if (bip->bli_flags & XFS_BLI_STALE)
 460                return;
 461
 462        ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
 463
 464        /*
 465         * Free up the log item descriptor tracking the released item.
 466         */
 467        xfs_trans_del_item(&bip->bli_item);
 468
 469        /*
 470         * Clear the hold flag in the buf log item if it is set.
 471         * We wouldn't want the next user of the buffer to
 472         * get confused.
 473         */
 474        if (bip->bli_flags & XFS_BLI_HOLD) {
 475                bip->bli_flags &= ~XFS_BLI_HOLD;
 476        }
 477
 478        /*
 479         * Drop our reference to the buf log item.
 480         */
 481        atomic_dec(&bip->bli_refcount);
 482
 483        /*
 484         * If the buf item is not tracking data in the log, then
 485         * we must free it before releasing the buffer back to the
 486         * free pool.  Before releasing the buffer to the free pool,
 487         * clear the transaction pointer in b_fsprivate2 to dissolve
 488         * its relation to this transaction.
 489         */
 490        if (!xfs_buf_item_dirty(bip)) {
 491/***
 492                ASSERT(bp->b_pincount == 0);
 493***/
 494                ASSERT(atomic_read(&bip->bli_refcount) == 0);
 495                ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
 496                ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
 497                xfs_buf_item_relse(bp);
 498        }
 499
 500        bp->b_transp = NULL;
 501        xfs_buf_relse(bp);
 502}
 503
 504/*
 505 * Mark the buffer as not needing to be unlocked when the buf item's
 506 * IOP_UNLOCK() routine is called.  The buffer must already be locked
 507 * and associated with the given transaction.
 508 */
 509/* ARGSUSED */
 510void
 511xfs_trans_bhold(xfs_trans_t     *tp,
 512                xfs_buf_t       *bp)
 513{
 514        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 515
 516        ASSERT(bp->b_transp == tp);
 517        ASSERT(bip != NULL);
 518        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 519        ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
 520        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 521
 522        bip->bli_flags |= XFS_BLI_HOLD;
 523        trace_xfs_trans_bhold(bip);
 524}
 525
 526/*
 527 * Cancel the previous buffer hold request made on this buffer
 528 * for this transaction.
 529 */
 530void
 531xfs_trans_bhold_release(xfs_trans_t     *tp,
 532                        xfs_buf_t       *bp)
 533{
 534        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 535
 536        ASSERT(bp->b_transp == tp);
 537        ASSERT(bip != NULL);
 538        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 539        ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
 540        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 541        ASSERT(bip->bli_flags & XFS_BLI_HOLD);
 542
 543        bip->bli_flags &= ~XFS_BLI_HOLD;
 544        trace_xfs_trans_bhold_release(bip);
 545}
 546
 547/*
 548 * This is called to mark bytes first through last inclusive of the given
 549 * buffer as needing to be logged when the transaction is committed.
 550 * The buffer must already be associated with the given transaction.
 551 *
 552 * First and last are numbers relative to the beginning of this buffer,
 553 * so the first byte in the buffer is numbered 0 regardless of the
 554 * value of b_blkno.
 555 */
 556void
 557xfs_trans_log_buf(xfs_trans_t   *tp,
 558                  xfs_buf_t     *bp,
 559                  uint          first,
 560                  uint          last)
 561{
 562        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 563
 564        ASSERT(bp->b_transp == tp);
 565        ASSERT(bip != NULL);
 566        ASSERT(first <= last && last < BBTOB(bp->b_length));
 567        ASSERT(bp->b_iodone == NULL ||
 568               bp->b_iodone == xfs_buf_iodone_callbacks);
 569
 570        /*
 571         * Mark the buffer as needing to be written out eventually,
 572         * and set its iodone function to remove the buffer's buf log
 573         * item from the AIL and free it when the buffer is flushed
 574         * to disk.  See xfs_buf_attach_iodone() for more details
 575         * on li_cb and xfs_buf_iodone_callbacks().
 576         * If we end up aborting this transaction, we trap this buffer
 577         * inside the b_bdstrat callback so that this won't get written to
 578         * disk.
 579         */
 580        XFS_BUF_DONE(bp);
 581
 582        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 583        bp->b_iodone = xfs_buf_iodone_callbacks;
 584        bip->bli_item.li_cb = xfs_buf_iodone;
 585
 586        trace_xfs_trans_log_buf(bip);
 587
 588        /*
 589         * If we invalidated the buffer within this transaction, then
 590         * cancel the invalidation now that we're dirtying the buffer
 591         * again.  There are no races with the code in xfs_buf_item_unpin(),
 592         * because we have a reference to the buffer this entire time.
 593         */
 594        if (bip->bli_flags & XFS_BLI_STALE) {
 595                bip->bli_flags &= ~XFS_BLI_STALE;
 596                ASSERT(XFS_BUF_ISSTALE(bp));
 597                XFS_BUF_UNSTALE(bp);
 598                bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
 599        }
 600
 601        tp->t_flags |= XFS_TRANS_DIRTY;
 602        bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 603        bip->bli_flags |= XFS_BLI_LOGGED;
 604        xfs_buf_item_log(bip, first, last);
 605}
 606
 607
 608/*
 609 * Invalidate a buffer that is being used within a transaction.
 610 *
 611 * Typically this is because the blocks in the buffer are being freed, so we
 612 * need to prevent it from being written out when we're done.  Allowing it
 613 * to be written again might overwrite data in the free blocks if they are
 614 * reallocated to a file.
 615 *
 616 * We prevent the buffer from being written out by marking it stale.  We can't
 617 * get rid of the buf log item at this point because the buffer may still be
 618 * pinned by another transaction.  If that is the case, then we'll wait until
 619 * the buffer is committed to disk for the last time (we can tell by the ref
 620 * count) and free it in xfs_buf_item_unpin().  Until that happens we will
 621 * keep the buffer locked so that the buffer and buf log item are not reused.
 622 *
 623 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
 624 * the buf item.  This will be used at recovery time to determine that copies
 625 * of the buffer in the log before this should not be replayed.
 626 *
 627 * We mark the item descriptor and the transaction dirty so that we'll hold
 628 * the buffer until after the commit.
 629 *
 630 * Since we're invalidating the buffer, we also clear the state about which
 631 * parts of the buffer have been logged.  We also clear the flag indicating
 632 * that this is an inode buffer since the data in the buffer will no longer
 633 * be valid.
 634 *
 635 * We set the stale bit in the buffer as well since we're getting rid of it.
 636 */
 637void
 638xfs_trans_binval(
 639        xfs_trans_t     *tp,
 640        xfs_buf_t       *bp)
 641{
 642        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 643
 644        ASSERT(bp->b_transp == tp);
 645        ASSERT(bip != NULL);
 646        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 647
 648        trace_xfs_trans_binval(bip);
 649
 650        if (bip->bli_flags & XFS_BLI_STALE) {
 651                /*
 652                 * If the buffer is already invalidated, then
 653                 * just return.
 654                 */
 655                ASSERT(XFS_BUF_ISSTALE(bp));
 656                ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
 657                ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
 658                ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
 659                ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
 660                ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
 661                return;
 662        }
 663
 664        xfs_buf_stale(bp);
 665
 666        bip->bli_flags |= XFS_BLI_STALE;
 667        bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
 668        bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
 669        bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
 670        memset((char *)(bip->bli_format.blf_data_map), 0,
 671              (bip->bli_format.blf_map_size * sizeof(uint)));
 672        bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 673        tp->t_flags |= XFS_TRANS_DIRTY;
 674}
 675
 676/*
 677 * This call is used to indicate that the buffer contains on-disk inodes which
 678 * must be handled specially during recovery.  They require special handling
 679 * because only the di_next_unlinked from the inodes in the buffer should be
 680 * recovered.  The rest of the data in the buffer is logged via the inodes
 681 * themselves.
 682 *
 683 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
 684 * transferred to the buffer's log format structure so that we'll know what to
 685 * do at recovery time.
 686 */
 687void
 688xfs_trans_inode_buf(
 689        xfs_trans_t     *tp,
 690        xfs_buf_t       *bp)
 691{
 692        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 693
 694        ASSERT(bp->b_transp == tp);
 695        ASSERT(bip != NULL);
 696        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 697
 698        bip->bli_flags |= XFS_BLI_INODE_BUF;
 699}
 700
 701/*
 702 * This call is used to indicate that the buffer is going to
 703 * be staled and was an inode buffer. This means it gets
 704 * special processing during unpin - where any inodes 
 705 * associated with the buffer should be removed from ail.
 706 * There is also special processing during recovery,
 707 * any replay of the inodes in the buffer needs to be
 708 * prevented as the buffer may have been reused.
 709 */
 710void
 711xfs_trans_stale_inode_buf(
 712        xfs_trans_t     *tp,
 713        xfs_buf_t       *bp)
 714{
 715        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 716
 717        ASSERT(bp->b_transp == tp);
 718        ASSERT(bip != NULL);
 719        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 720
 721        bip->bli_flags |= XFS_BLI_STALE_INODE;
 722        bip->bli_item.li_cb = xfs_buf_iodone;
 723}
 724
 725/*
 726 * Mark the buffer as being one which contains newly allocated
 727 * inodes.  We need to make sure that even if this buffer is
 728 * relogged as an 'inode buf' we still recover all of the inode
 729 * images in the face of a crash.  This works in coordination with
 730 * xfs_buf_item_committed() to ensure that the buffer remains in the
 731 * AIL at its original location even after it has been relogged.
 732 */
 733/* ARGSUSED */
 734void
 735xfs_trans_inode_alloc_buf(
 736        xfs_trans_t     *tp,
 737        xfs_buf_t       *bp)
 738{
 739        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 740
 741        ASSERT(bp->b_transp == tp);
 742        ASSERT(bip != NULL);
 743        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 744
 745        bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
 746}
 747
 748
 749/*
 750 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
 751 * dquots. However, unlike in inode buffer recovery, dquot buffers get
 752 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
 753 * The only thing that makes dquot buffers different from regular
 754 * buffers is that we must not replay dquot bufs when recovering
 755 * if a _corresponding_ quotaoff has happened. We also have to distinguish
 756 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
 757 * can be turned off independently.
 758 */
 759/* ARGSUSED */
 760void
 761xfs_trans_dquot_buf(
 762        xfs_trans_t     *tp,
 763        xfs_buf_t       *bp,
 764        uint            type)
 765{
 766        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 767
 768        ASSERT(bp->b_transp == tp);
 769        ASSERT(bip != NULL);
 770        ASSERT(type == XFS_BLF_UDQUOT_BUF ||
 771               type == XFS_BLF_PDQUOT_BUF ||
 772               type == XFS_BLF_GDQUOT_BUF);
 773        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 774
 775        bip->bli_format.blf_flags |= type;
 776}
 777
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.