linux/fs/xfs/xfs_trans_buf.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_log.h"
  22#include "xfs_trans.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_mount.h"
  26#include "xfs_bmap_btree.h"
  27#include "xfs_alloc_btree.h"
  28#include "xfs_ialloc_btree.h"
  29#include "xfs_dinode.h"
  30#include "xfs_inode.h"
  31#include "xfs_buf_item.h"
  32#include "xfs_trans_priv.h"
  33#include "xfs_error.h"
  34#include "xfs_trace.h"
  35
  36/*
  37 * Check to see if a buffer matching the given parameters is already
  38 * a part of the given transaction.
  39 */
  40STATIC struct xfs_buf *
  41xfs_trans_buf_item_match(
  42        struct xfs_trans        *tp,
  43        struct xfs_buftarg      *target,
  44        struct xfs_buf_map      *map,
  45        int                     nmaps)
  46{
  47        struct xfs_log_item_desc *lidp;
  48        struct xfs_buf_log_item *blip;
  49        int                     len = 0;
  50        int                     i;
  51
  52        for (i = 0; i < nmaps; i++)
  53                len += map[i].bm_len;
  54
  55        list_for_each_entry(lidp, &tp->t_items, lid_trans) {
  56                blip = (struct xfs_buf_log_item *)lidp->lid_item;
  57                if (blip->bli_item.li_type == XFS_LI_BUF &&
  58                    blip->bli_buf->b_target == target &&
  59                    XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
  60                    blip->bli_buf->b_length == len) {
  61                        ASSERT(blip->bli_buf->b_map_count == nmaps);
  62                        return blip->bli_buf;
  63                }
  64        }
  65
  66        return NULL;
  67}
  68
  69/*
  70 * Add the locked buffer to the transaction.
  71 *
  72 * The buffer must be locked, and it cannot be associated with any
  73 * transaction.
  74 *
  75 * If the buffer does not yet have a buf log item associated with it,
  76 * then allocate one for it.  Then add the buf item to the transaction.
  77 */
  78STATIC void
  79_xfs_trans_bjoin(
  80        struct xfs_trans        *tp,
  81        struct xfs_buf          *bp,
  82        int                     reset_recur)
  83{
  84        struct xfs_buf_log_item *bip;
  85
  86        ASSERT(bp->b_transp == NULL);
  87
  88        /*
  89         * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
  90         * it doesn't have one yet, then allocate one and initialize it.
  91         * The checks to see if one is there are in xfs_buf_item_init().
  92         */
  93        xfs_buf_item_init(bp, tp->t_mountp);
  94        bip = bp->b_fspriv;
  95        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
  96        ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
  97        ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
  98        if (reset_recur)
  99                bip->bli_recur = 0;
 100
 101        /*
 102         * Take a reference for this transaction on the buf item.
 103         */
 104        atomic_inc(&bip->bli_refcount);
 105
 106        /*
 107         * Get a log_item_desc to point at the new item.
 108         */
 109        xfs_trans_add_item(tp, &bip->bli_item);
 110
 111        /*
 112         * Initialize b_fsprivate2 so we can find it with incore_match()
 113         * in xfs_trans_get_buf() and friends above.
 114         */
 115        bp->b_transp = tp;
 116
 117}
 118
 119void
 120xfs_trans_bjoin(
 121        struct xfs_trans        *tp,
 122        struct xfs_buf          *bp)
 123{
 124        _xfs_trans_bjoin(tp, bp, 0);
 125        trace_xfs_trans_bjoin(bp->b_fspriv);
 126}
 127
 128/*
 129 * Get and lock the buffer for the caller if it is not already
 130 * locked within the given transaction.  If it is already locked
 131 * within the transaction, just increment its lock recursion count
 132 * and return a pointer to it.
 133 *
 134 * If the transaction pointer is NULL, make this just a normal
 135 * get_buf() call.
 136 */
 137struct xfs_buf *
 138xfs_trans_get_buf_map(
 139        struct xfs_trans        *tp,
 140        struct xfs_buftarg      *target,
 141        struct xfs_buf_map      *map,
 142        int                     nmaps,
 143        xfs_buf_flags_t         flags)
 144{
 145        xfs_buf_t               *bp;
 146        xfs_buf_log_item_t      *bip;
 147
 148        if (!tp)
 149                return xfs_buf_get_map(target, map, nmaps, flags);
 150
 151        /*
 152         * If we find the buffer in the cache with this transaction
 153         * pointer in its b_fsprivate2 field, then we know we already
 154         * have it locked.  In this case we just increment the lock
 155         * recursion count and return the buffer to the caller.
 156         */
 157        bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
 158        if (bp != NULL) {
 159                ASSERT(xfs_buf_islocked(bp));
 160                if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
 161                        xfs_buf_stale(bp);
 162                        XFS_BUF_DONE(bp);
 163                }
 164
 165                ASSERT(bp->b_transp == tp);
 166                bip = bp->b_fspriv;
 167                ASSERT(bip != NULL);
 168                ASSERT(atomic_read(&bip->bli_refcount) > 0);
 169                bip->bli_recur++;
 170                trace_xfs_trans_get_buf_recur(bip);
 171                return (bp);
 172        }
 173
 174        bp = xfs_buf_get_map(target, map, nmaps, flags);
 175        if (bp == NULL) {
 176                return NULL;
 177        }
 178
 179        ASSERT(!bp->b_error);
 180
 181        _xfs_trans_bjoin(tp, bp, 1);
 182        trace_xfs_trans_get_buf(bp->b_fspriv);
 183        return (bp);
 184}
 185
 186/*
 187 * Get and lock the superblock buffer of this file system for the
 188 * given transaction.
 189 *
 190 * We don't need to use incore_match() here, because the superblock
 191 * buffer is a private buffer which we keep a pointer to in the
 192 * mount structure.
 193 */
 194xfs_buf_t *
 195xfs_trans_getsb(xfs_trans_t     *tp,
 196                struct xfs_mount *mp,
 197                int             flags)
 198{
 199        xfs_buf_t               *bp;
 200        xfs_buf_log_item_t      *bip;
 201
 202        /*
 203         * Default to just trying to lock the superblock buffer
 204         * if tp is NULL.
 205         */
 206        if (tp == NULL) {
 207                return (xfs_getsb(mp, flags));
 208        }
 209
 210        /*
 211         * If the superblock buffer already has this transaction
 212         * pointer in its b_fsprivate2 field, then we know we already
 213         * have it locked.  In this case we just increment the lock
 214         * recursion count and return the buffer to the caller.
 215         */
 216        bp = mp->m_sb_bp;
 217        if (bp->b_transp == tp) {
 218                bip = bp->b_fspriv;
 219                ASSERT(bip != NULL);
 220                ASSERT(atomic_read(&bip->bli_refcount) > 0);
 221                bip->bli_recur++;
 222                trace_xfs_trans_getsb_recur(bip);
 223                return (bp);
 224        }
 225
 226        bp = xfs_getsb(mp, flags);
 227        if (bp == NULL)
 228                return NULL;
 229
 230        _xfs_trans_bjoin(tp, bp, 1);
 231        trace_xfs_trans_getsb(bp->b_fspriv);
 232        return (bp);
 233}
 234
 235#ifdef DEBUG
 236xfs_buftarg_t *xfs_error_target;
 237int     xfs_do_error;
 238int     xfs_req_num;
 239int     xfs_error_mod = 33;
 240#endif
 241
 242/*
 243 * Get and lock the buffer for the caller if it is not already
 244 * locked within the given transaction.  If it has not yet been
 245 * read in, read it from disk. If it is already locked
 246 * within the transaction and already read in, just increment its
 247 * lock recursion count and return a pointer to it.
 248 *
 249 * If the transaction pointer is NULL, make this just a normal
 250 * read_buf() call.
 251 */
 252int
 253xfs_trans_read_buf_map(
 254        struct xfs_mount        *mp,
 255        struct xfs_trans        *tp,
 256        struct xfs_buftarg      *target,
 257        struct xfs_buf_map      *map,
 258        int                     nmaps,
 259        xfs_buf_flags_t         flags,
 260        struct xfs_buf          **bpp,
 261        const struct xfs_buf_ops *ops)
 262{
 263        xfs_buf_t               *bp;
 264        xfs_buf_log_item_t      *bip;
 265        int                     error;
 266
 267        *bpp = NULL;
 268        if (!tp) {
 269                bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
 270                if (!bp)
 271                        return (flags & XBF_TRYLOCK) ?
 272                                        EAGAIN : XFS_ERROR(ENOMEM);
 273
 274                if (bp->b_error) {
 275                        error = bp->b_error;
 276                        xfs_buf_ioerror_alert(bp, __func__);
 277                        XFS_BUF_UNDONE(bp);
 278                        xfs_buf_stale(bp);
 279                        xfs_buf_relse(bp);
 280                        return error;
 281                }
 282#ifdef DEBUG
 283                if (xfs_do_error) {
 284                        if (xfs_error_target == target) {
 285                                if (((xfs_req_num++) % xfs_error_mod) == 0) {
 286                                        xfs_buf_relse(bp);
 287                                        xfs_debug(mp, "Returning error!");
 288                                        return XFS_ERROR(EIO);
 289                                }
 290                        }
 291                }
 292#endif
 293                if (XFS_FORCED_SHUTDOWN(mp))
 294                        goto shutdown_abort;
 295                *bpp = bp;
 296                return 0;
 297        }
 298
 299        /*
 300         * If we find the buffer in the cache with this transaction
 301         * pointer in its b_fsprivate2 field, then we know we already
 302         * have it locked.  If it is already read in we just increment
 303         * the lock recursion count and return the buffer to the caller.
 304         * If the buffer is not yet read in, then we read it in, increment
 305         * the lock recursion count, and return it to the caller.
 306         */
 307        bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
 308        if (bp != NULL) {
 309                ASSERT(xfs_buf_islocked(bp));
 310                ASSERT(bp->b_transp == tp);
 311                ASSERT(bp->b_fspriv != NULL);
 312                ASSERT(!bp->b_error);
 313                if (!(XFS_BUF_ISDONE(bp))) {
 314                        trace_xfs_trans_read_buf_io(bp, _RET_IP_);
 315                        ASSERT(!XFS_BUF_ISASYNC(bp));
 316                        ASSERT(bp->b_iodone == NULL);
 317                        XFS_BUF_READ(bp);
 318                        bp->b_ops = ops;
 319                        xfsbdstrat(tp->t_mountp, bp);
 320                        error = xfs_buf_iowait(bp);
 321                        if (error) {
 322                                xfs_buf_ioerror_alert(bp, __func__);
 323                                xfs_buf_relse(bp);
 324                                /*
 325                                 * We can gracefully recover from most read
 326                                 * errors. Ones we can't are those that happen
 327                                 * after the transaction's already dirty.
 328                                 */
 329                                if (tp->t_flags & XFS_TRANS_DIRTY)
 330                                        xfs_force_shutdown(tp->t_mountp,
 331                                                        SHUTDOWN_META_IO_ERROR);
 332                                return error;
 333                        }
 334                }
 335                /*
 336                 * We never locked this buf ourselves, so we shouldn't
 337                 * brelse it either. Just get out.
 338                 */
 339                if (XFS_FORCED_SHUTDOWN(mp)) {
 340                        trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
 341                        *bpp = NULL;
 342                        return XFS_ERROR(EIO);
 343                }
 344
 345
 346                bip = bp->b_fspriv;
 347                bip->bli_recur++;
 348
 349                ASSERT(atomic_read(&bip->bli_refcount) > 0);
 350                trace_xfs_trans_read_buf_recur(bip);
 351                *bpp = bp;
 352                return 0;
 353        }
 354
 355        bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
 356        if (bp == NULL) {
 357                *bpp = NULL;
 358                return (flags & XBF_TRYLOCK) ?
 359                                        0 : XFS_ERROR(ENOMEM);
 360        }
 361        if (bp->b_error) {
 362                error = bp->b_error;
 363                xfs_buf_stale(bp);
 364                XFS_BUF_DONE(bp);
 365                xfs_buf_ioerror_alert(bp, __func__);
 366                if (tp->t_flags & XFS_TRANS_DIRTY)
 367                        xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
 368                xfs_buf_relse(bp);
 369                return error;
 370        }
 371#ifdef DEBUG
 372        if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
 373                if (xfs_error_target == target) {
 374                        if (((xfs_req_num++) % xfs_error_mod) == 0) {
 375                                xfs_force_shutdown(tp->t_mountp,
 376                                                   SHUTDOWN_META_IO_ERROR);
 377                                xfs_buf_relse(bp);
 378                                xfs_debug(mp, "Returning trans error!");
 379                                return XFS_ERROR(EIO);
 380                        }
 381                }
 382        }
 383#endif
 384        if (XFS_FORCED_SHUTDOWN(mp))
 385                goto shutdown_abort;
 386
 387        _xfs_trans_bjoin(tp, bp, 1);
 388        trace_xfs_trans_read_buf(bp->b_fspriv);
 389
 390        *bpp = bp;
 391        return 0;
 392
 393shutdown_abort:
 394        trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
 395        xfs_buf_relse(bp);
 396        *bpp = NULL;
 397        return XFS_ERROR(EIO);
 398}
 399
 400
 401/*
 402 * Release the buffer bp which was previously acquired with one of the
 403 * xfs_trans_... buffer allocation routines if the buffer has not
 404 * been modified within this transaction.  If the buffer is modified
 405 * within this transaction, do decrement the recursion count but do
 406 * not release the buffer even if the count goes to 0.  If the buffer is not
 407 * modified within the transaction, decrement the recursion count and
 408 * release the buffer if the recursion count goes to 0.
 409 *
 410 * If the buffer is to be released and it was not modified before
 411 * this transaction began, then free the buf_log_item associated with it.
 412 *
 413 * If the transaction pointer is NULL, make this just a normal
 414 * brelse() call.
 415 */
 416void
 417xfs_trans_brelse(xfs_trans_t    *tp,
 418                 xfs_buf_t      *bp)
 419{
 420        xfs_buf_log_item_t      *bip;
 421
 422        /*
 423         * Default to a normal brelse() call if the tp is NULL.
 424         */
 425        if (tp == NULL) {
 426                ASSERT(bp->b_transp == NULL);
 427                xfs_buf_relse(bp);
 428                return;
 429        }
 430
 431        ASSERT(bp->b_transp == tp);
 432        bip = bp->b_fspriv;
 433        ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
 434        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 435        ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
 436        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 437
 438        trace_xfs_trans_brelse(bip);
 439
 440        /*
 441         * If the release is just for a recursive lock,
 442         * then decrement the count and return.
 443         */
 444        if (bip->bli_recur > 0) {
 445                bip->bli_recur--;
 446                return;
 447        }
 448
 449        /*
 450         * If the buffer is dirty within this transaction, we can't
 451         * release it until we commit.
 452         */
 453        if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
 454                return;
 455
 456        /*
 457         * If the buffer has been invalidated, then we can't release
 458         * it until the transaction commits to disk unless it is re-dirtied
 459         * as part of this transaction.  This prevents us from pulling
 460         * the item from the AIL before we should.
 461         */
 462        if (bip->bli_flags & XFS_BLI_STALE)
 463                return;
 464
 465        ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
 466
 467        /*
 468         * Free up the log item descriptor tracking the released item.
 469         */
 470        xfs_trans_del_item(&bip->bli_item);
 471
 472        /*
 473         * Clear the hold flag in the buf log item if it is set.
 474         * We wouldn't want the next user of the buffer to
 475         * get confused.
 476         */
 477        if (bip->bli_flags & XFS_BLI_HOLD) {
 478                bip->bli_flags &= ~XFS_BLI_HOLD;
 479        }
 480
 481        /*
 482         * Drop our reference to the buf log item.
 483         */
 484        atomic_dec(&bip->bli_refcount);
 485
 486        /*
 487         * If the buf item is not tracking data in the log, then
 488         * we must free it before releasing the buffer back to the
 489         * free pool.  Before releasing the buffer to the free pool,
 490         * clear the transaction pointer in b_fsprivate2 to dissolve
 491         * its relation to this transaction.
 492         */
 493        if (!xfs_buf_item_dirty(bip)) {
 494/***
 495                ASSERT(bp->b_pincount == 0);
 496***/
 497                ASSERT(atomic_read(&bip->bli_refcount) == 0);
 498                ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
 499                ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
 500                xfs_buf_item_relse(bp);
 501        }
 502
 503        bp->b_transp = NULL;
 504        xfs_buf_relse(bp);
 505}
 506
 507/*
 508 * Mark the buffer as not needing to be unlocked when the buf item's
 509 * IOP_UNLOCK() routine is called.  The buffer must already be locked
 510 * and associated with the given transaction.
 511 */
 512/* ARGSUSED */
 513void
 514xfs_trans_bhold(xfs_trans_t     *tp,
 515                xfs_buf_t       *bp)
 516{
 517        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 518
 519        ASSERT(bp->b_transp == tp);
 520        ASSERT(bip != NULL);
 521        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 522        ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
 523        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 524
 525        bip->bli_flags |= XFS_BLI_HOLD;
 526        trace_xfs_trans_bhold(bip);
 527}
 528
 529/*
 530 * Cancel the previous buffer hold request made on this buffer
 531 * for this transaction.
 532 */
 533void
 534xfs_trans_bhold_release(xfs_trans_t     *tp,
 535                        xfs_buf_t       *bp)
 536{
 537        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 538
 539        ASSERT(bp->b_transp == tp);
 540        ASSERT(bip != NULL);
 541        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 542        ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
 543        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 544        ASSERT(bip->bli_flags & XFS_BLI_HOLD);
 545
 546        bip->bli_flags &= ~XFS_BLI_HOLD;
 547        trace_xfs_trans_bhold_release(bip);
 548}
 549
 550/*
 551 * This is called to mark bytes first through last inclusive of the given
 552 * buffer as needing to be logged when the transaction is committed.
 553 * The buffer must already be associated with the given transaction.
 554 *
 555 * First and last are numbers relative to the beginning of this buffer,
 556 * so the first byte in the buffer is numbered 0 regardless of the
 557 * value of b_blkno.
 558 */
 559void
 560xfs_trans_log_buf(xfs_trans_t   *tp,
 561                  xfs_buf_t     *bp,
 562                  uint          first,
 563                  uint          last)
 564{
 565        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 566
 567        ASSERT(bp->b_transp == tp);
 568        ASSERT(bip != NULL);
 569        ASSERT(first <= last && last < BBTOB(bp->b_length));
 570        ASSERT(bp->b_iodone == NULL ||
 571               bp->b_iodone == xfs_buf_iodone_callbacks);
 572
 573        /*
 574         * Mark the buffer as needing to be written out eventually,
 575         * and set its iodone function to remove the buffer's buf log
 576         * item from the AIL and free it when the buffer is flushed
 577         * to disk.  See xfs_buf_attach_iodone() for more details
 578         * on li_cb and xfs_buf_iodone_callbacks().
 579         * If we end up aborting this transaction, we trap this buffer
 580         * inside the b_bdstrat callback so that this won't get written to
 581         * disk.
 582         */
 583        XFS_BUF_DONE(bp);
 584
 585        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 586        bp->b_iodone = xfs_buf_iodone_callbacks;
 587        bip->bli_item.li_cb = xfs_buf_iodone;
 588
 589        trace_xfs_trans_log_buf(bip);
 590
 591        /*
 592         * If we invalidated the buffer within this transaction, then
 593         * cancel the invalidation now that we're dirtying the buffer
 594         * again.  There are no races with the code in xfs_buf_item_unpin(),
 595         * because we have a reference to the buffer this entire time.
 596         */
 597        if (bip->bli_flags & XFS_BLI_STALE) {
 598                bip->bli_flags &= ~XFS_BLI_STALE;
 599                ASSERT(XFS_BUF_ISSTALE(bp));
 600                XFS_BUF_UNSTALE(bp);
 601                bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
 602        }
 603
 604        tp->t_flags |= XFS_TRANS_DIRTY;
 605        bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 606        bip->bli_flags |= XFS_BLI_LOGGED;
 607        xfs_buf_item_log(bip, first, last);
 608}
 609
 610
 611/*
 612 * Invalidate a buffer that is being used within a transaction.
 613 *
 614 * Typically this is because the blocks in the buffer are being freed, so we
 615 * need to prevent it from being written out when we're done.  Allowing it
 616 * to be written again might overwrite data in the free blocks if they are
 617 * reallocated to a file.
 618 *
 619 * We prevent the buffer from being written out by marking it stale.  We can't
 620 * get rid of the buf log item at this point because the buffer may still be
 621 * pinned by another transaction.  If that is the case, then we'll wait until
 622 * the buffer is committed to disk for the last time (we can tell by the ref
 623 * count) and free it in xfs_buf_item_unpin().  Until that happens we will
 624 * keep the buffer locked so that the buffer and buf log item are not reused.
 625 *
 626 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
 627 * the buf item.  This will be used at recovery time to determine that copies
 628 * of the buffer in the log before this should not be replayed.
 629 *
 630 * We mark the item descriptor and the transaction dirty so that we'll hold
 631 * the buffer until after the commit.
 632 *
 633 * Since we're invalidating the buffer, we also clear the state about which
 634 * parts of the buffer have been logged.  We also clear the flag indicating
 635 * that this is an inode buffer since the data in the buffer will no longer
 636 * be valid.
 637 *
 638 * We set the stale bit in the buffer as well since we're getting rid of it.
 639 */
 640void
 641xfs_trans_binval(
 642        xfs_trans_t     *tp,
 643        xfs_buf_t       *bp)
 644{
 645        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 646        int                     i;
 647
 648        ASSERT(bp->b_transp == tp);
 649        ASSERT(bip != NULL);
 650        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 651
 652        trace_xfs_trans_binval(bip);
 653
 654        if (bip->bli_flags & XFS_BLI_STALE) {
 655                /*
 656                 * If the buffer is already invalidated, then
 657                 * just return.
 658                 */
 659                ASSERT(XFS_BUF_ISSTALE(bp));
 660                ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
 661                ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
 662                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 663                ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
 664                ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
 665                return;
 666        }
 667
 668        xfs_buf_stale(bp);
 669
 670        bip->bli_flags |= XFS_BLI_STALE;
 671        bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
 672        bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
 673        bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
 674        for (i = 0; i < bip->bli_format_count; i++) {
 675                memset(bip->bli_formats[i].blf_data_map, 0,
 676                       (bip->bli_formats[i].blf_map_size * sizeof(uint)));
 677        }
 678        bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 679        tp->t_flags |= XFS_TRANS_DIRTY;
 680}
 681
 682/*
 683 * This call is used to indicate that the buffer contains on-disk inodes which
 684 * must be handled specially during recovery.  They require special handling
 685 * because only the di_next_unlinked from the inodes in the buffer should be
 686 * recovered.  The rest of the data in the buffer is logged via the inodes
 687 * themselves.
 688 *
 689 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
 690 * transferred to the buffer's log format structure so that we'll know what to
 691 * do at recovery time.
 692 */
 693void
 694xfs_trans_inode_buf(
 695        xfs_trans_t     *tp,
 696        xfs_buf_t       *bp)
 697{
 698        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 699
 700        ASSERT(bp->b_transp == tp);
 701        ASSERT(bip != NULL);
 702        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 703
 704        bip->bli_flags |= XFS_BLI_INODE_BUF;
 705}
 706
 707/*
 708 * This call is used to indicate that the buffer is going to
 709 * be staled and was an inode buffer. This means it gets
 710 * special processing during unpin - where any inodes 
 711 * associated with the buffer should be removed from ail.
 712 * There is also special processing during recovery,
 713 * any replay of the inodes in the buffer needs to be
 714 * prevented as the buffer may have been reused.
 715 */
 716void
 717xfs_trans_stale_inode_buf(
 718        xfs_trans_t     *tp,
 719        xfs_buf_t       *bp)
 720{
 721        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 722
 723        ASSERT(bp->b_transp == tp);
 724        ASSERT(bip != NULL);
 725        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 726
 727        bip->bli_flags |= XFS_BLI_STALE_INODE;
 728        bip->bli_item.li_cb = xfs_buf_iodone;
 729}
 730
 731/*
 732 * Mark the buffer as being one which contains newly allocated
 733 * inodes.  We need to make sure that even if this buffer is
 734 * relogged as an 'inode buf' we still recover all of the inode
 735 * images in the face of a crash.  This works in coordination with
 736 * xfs_buf_item_committed() to ensure that the buffer remains in the
 737 * AIL at its original location even after it has been relogged.
 738 */
 739/* ARGSUSED */
 740void
 741xfs_trans_inode_alloc_buf(
 742        xfs_trans_t     *tp,
 743        xfs_buf_t       *bp)
 744{
 745        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 746
 747        ASSERT(bp->b_transp == tp);
 748        ASSERT(bip != NULL);
 749        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 750
 751        bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
 752}
 753
 754
 755/*
 756 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
 757 * dquots. However, unlike in inode buffer recovery, dquot buffers get
 758 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
 759 * The only thing that makes dquot buffers different from regular
 760 * buffers is that we must not replay dquot bufs when recovering
 761 * if a _corresponding_ quotaoff has happened. We also have to distinguish
 762 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
 763 * can be turned off independently.
 764 */
 765/* ARGSUSED */
 766void
 767xfs_trans_dquot_buf(
 768        xfs_trans_t     *tp,
 769        xfs_buf_t       *bp,
 770        uint            type)
 771{
 772        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 773
 774        ASSERT(bp->b_transp == tp);
 775        ASSERT(bip != NULL);
 776        ASSERT(type == XFS_BLF_UDQUOT_BUF ||
 777               type == XFS_BLF_PDQUOT_BUF ||
 778               type == XFS_BLF_GDQUOT_BUF);
 779        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 780
 781        bip->__bli_format.blf_flags |= type;
 782}
 783
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.