linux/fs/xfs/xfs_inode_item.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_log.h"
  22#include "xfs_trans.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_mount.h"
  26#include "xfs_trans_priv.h"
  27#include "xfs_bmap_btree.h"
  28#include "xfs_dinode.h"
  29#include "xfs_inode.h"
  30#include "xfs_inode_item.h"
  31#include "xfs_error.h"
  32#include "xfs_trace.h"
  33
  34
  35kmem_zone_t     *xfs_ili_zone;          /* inode log item zone */
  36
  37static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
  38{
  39        return container_of(lip, struct xfs_inode_log_item, ili_item);
  40}
  41
  42
  43/*
  44 * This returns the number of iovecs needed to log the given inode item.
  45 *
  46 * We need one iovec for the inode log format structure, one for the
  47 * inode core, and possibly one for the inode data/extents/b-tree root
  48 * and one for the inode attribute data/extents/b-tree root.
  49 */
  50STATIC uint
  51xfs_inode_item_size(
  52        struct xfs_log_item     *lip)
  53{
  54        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
  55        struct xfs_inode        *ip = iip->ili_inode;
  56        uint                    nvecs = 2;
  57
  58        switch (ip->i_d.di_format) {
  59        case XFS_DINODE_FMT_EXTENTS:
  60                if ((iip->ili_fields & XFS_ILOG_DEXT) &&
  61                    ip->i_d.di_nextents > 0 &&
  62                    ip->i_df.if_bytes > 0)
  63                        nvecs++;
  64                break;
  65
  66        case XFS_DINODE_FMT_BTREE:
  67                if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
  68                    ip->i_df.if_broot_bytes > 0)
  69                        nvecs++;
  70                break;
  71
  72        case XFS_DINODE_FMT_LOCAL:
  73                if ((iip->ili_fields & XFS_ILOG_DDATA) &&
  74                    ip->i_df.if_bytes > 0)
  75                        nvecs++;
  76                break;
  77
  78        case XFS_DINODE_FMT_DEV:
  79        case XFS_DINODE_FMT_UUID:
  80                break;
  81
  82        default:
  83                ASSERT(0);
  84                break;
  85        }
  86
  87        if (!XFS_IFORK_Q(ip))
  88                return nvecs;
  89
  90
  91        /*
  92         * Log any necessary attribute data.
  93         */
  94        switch (ip->i_d.di_aformat) {
  95        case XFS_DINODE_FMT_EXTENTS:
  96                if ((iip->ili_fields & XFS_ILOG_AEXT) &&
  97                    ip->i_d.di_anextents > 0 &&
  98                    ip->i_afp->if_bytes > 0)
  99                        nvecs++;
 100                break;
 101
 102        case XFS_DINODE_FMT_BTREE:
 103                if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
 104                    ip->i_afp->if_broot_bytes > 0)
 105                        nvecs++;
 106                break;
 107
 108        case XFS_DINODE_FMT_LOCAL:
 109                if ((iip->ili_fields & XFS_ILOG_ADATA) &&
 110                    ip->i_afp->if_bytes > 0)
 111                        nvecs++;
 112                break;
 113
 114        default:
 115                ASSERT(0);
 116                break;
 117        }
 118
 119        return nvecs;
 120}
 121
 122/*
 123 * xfs_inode_item_format_extents - convert in-core extents to on-disk form
 124 *
 125 * For either the data or attr fork in extent format, we need to endian convert
 126 * the in-core extent as we place them into the on-disk inode. In this case, we
 127 * need to do this conversion before we write the extents into the log. Because
 128 * we don't have the disk inode to write into here, we allocate a buffer and
 129 * format the extents into it via xfs_iextents_copy(). We free the buffer in
 130 * the unlock routine after the copy for the log has been made.
 131 *
 132 * In the case of the data fork, the in-core and on-disk fork sizes can be
 133 * different due to delayed allocation extents. We only log on-disk extents
 134 * here, so always use the physical fork size to determine the size of the
 135 * buffer we need to allocate.
 136 */
 137STATIC void
 138xfs_inode_item_format_extents(
 139        struct xfs_inode        *ip,
 140        struct xfs_log_iovec    *vecp,
 141        int                     whichfork,
 142        int                     type)
 143{
 144        xfs_bmbt_rec_t          *ext_buffer;
 145
 146        ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
 147        if (whichfork == XFS_DATA_FORK)
 148                ip->i_itemp->ili_extents_buf = ext_buffer;
 149        else
 150                ip->i_itemp->ili_aextents_buf = ext_buffer;
 151
 152        vecp->i_addr = ext_buffer;
 153        vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
 154        vecp->i_type = type;
 155}
 156
 157/*
 158 * This is called to fill in the vector of log iovecs for the
 159 * given inode log item.  It fills the first item with an inode
 160 * log format structure, the second with the on-disk inode structure,
 161 * and a possible third and/or fourth with the inode data/extents/b-tree
 162 * root and inode attributes data/extents/b-tree root.
 163 */
 164STATIC void
 165xfs_inode_item_format(
 166        struct xfs_log_item     *lip,
 167        struct xfs_log_iovec    *vecp)
 168{
 169        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
 170        struct xfs_inode        *ip = iip->ili_inode;
 171        uint                    nvecs;
 172        size_t                  data_bytes;
 173        xfs_mount_t             *mp;
 174
 175        vecp->i_addr = &iip->ili_format;
 176        vecp->i_len  = sizeof(xfs_inode_log_format_t);
 177        vecp->i_type = XLOG_REG_TYPE_IFORMAT;
 178        vecp++;
 179        nvecs        = 1;
 180
 181        vecp->i_addr = &ip->i_d;
 182        vecp->i_len  = sizeof(struct xfs_icdinode);
 183        vecp->i_type = XLOG_REG_TYPE_ICORE;
 184        vecp++;
 185        nvecs++;
 186
 187        /*
 188         * If this is really an old format inode, then we need to
 189         * log it as such.  This means that we have to copy the link
 190         * count from the new field to the old.  We don't have to worry
 191         * about the new fields, because nothing trusts them as long as
 192         * the old inode version number is there.  If the superblock already
 193         * has a new version number, then we don't bother converting back.
 194         */
 195        mp = ip->i_mount;
 196        ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
 197        if (ip->i_d.di_version == 1) {
 198                if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
 199                        /*
 200                         * Convert it back.
 201                         */
 202                        ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
 203                        ip->i_d.di_onlink = ip->i_d.di_nlink;
 204                } else {
 205                        /*
 206                         * The superblock version has already been bumped,
 207                         * so just make the conversion to the new inode
 208                         * format permanent.
 209                         */
 210                        ip->i_d.di_version = 2;
 211                        ip->i_d.di_onlink = 0;
 212                        memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
 213                }
 214        }
 215
 216        switch (ip->i_d.di_format) {
 217        case XFS_DINODE_FMT_EXTENTS:
 218                iip->ili_fields &=
 219                        ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
 220                          XFS_ILOG_DEV | XFS_ILOG_UUID);
 221
 222                if ((iip->ili_fields & XFS_ILOG_DEXT) &&
 223                    ip->i_d.di_nextents > 0 &&
 224                    ip->i_df.if_bytes > 0) {
 225                        ASSERT(ip->i_df.if_u1.if_extents != NULL);
 226                        ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0);
 227                        ASSERT(iip->ili_extents_buf == NULL);
 228
 229#ifdef XFS_NATIVE_HOST
 230                       if (ip->i_d.di_nextents == ip->i_df.if_bytes /
 231                                               (uint)sizeof(xfs_bmbt_rec_t)) {
 232                                /*
 233                                 * There are no delayed allocation
 234                                 * extents, so just point to the
 235                                 * real extents array.
 236                                 */
 237                                vecp->i_addr = ip->i_df.if_u1.if_extents;
 238                                vecp->i_len = ip->i_df.if_bytes;
 239                                vecp->i_type = XLOG_REG_TYPE_IEXT;
 240                        } else
 241#endif
 242                        {
 243                                xfs_inode_item_format_extents(ip, vecp,
 244                                        XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
 245                        }
 246                        ASSERT(vecp->i_len <= ip->i_df.if_bytes);
 247                        iip->ili_format.ilf_dsize = vecp->i_len;
 248                        vecp++;
 249                        nvecs++;
 250                } else {
 251                        iip->ili_fields &= ~XFS_ILOG_DEXT;
 252                }
 253                break;
 254
 255        case XFS_DINODE_FMT_BTREE:
 256                iip->ili_fields &=
 257                        ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT |
 258                          XFS_ILOG_DEV | XFS_ILOG_UUID);
 259
 260                if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
 261                    ip->i_df.if_broot_bytes > 0) {
 262                        ASSERT(ip->i_df.if_broot != NULL);
 263                        vecp->i_addr = ip->i_df.if_broot;
 264                        vecp->i_len = ip->i_df.if_broot_bytes;
 265                        vecp->i_type = XLOG_REG_TYPE_IBROOT;
 266                        vecp++;
 267                        nvecs++;
 268                        iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
 269                } else {
 270                        ASSERT(!(iip->ili_fields &
 271                                 XFS_ILOG_DBROOT));
 272#ifdef XFS_TRANS_DEBUG
 273                        if (iip->ili_root_size > 0) {
 274                                ASSERT(iip->ili_root_size ==
 275                                       ip->i_df.if_broot_bytes);
 276                                ASSERT(memcmp(iip->ili_orig_root,
 277                                            ip->i_df.if_broot,
 278                                            iip->ili_root_size) == 0);
 279                        } else {
 280                                ASSERT(ip->i_df.if_broot_bytes == 0);
 281                        }
 282#endif
 283                        iip->ili_fields &= ~XFS_ILOG_DBROOT;
 284                }
 285                break;
 286
 287        case XFS_DINODE_FMT_LOCAL:
 288                iip->ili_fields &=
 289                        ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT |
 290                          XFS_ILOG_DEV | XFS_ILOG_UUID);
 291                if ((iip->ili_fields & XFS_ILOG_DDATA) &&
 292                    ip->i_df.if_bytes > 0) {
 293                        ASSERT(ip->i_df.if_u1.if_data != NULL);
 294                        ASSERT(ip->i_d.di_size > 0);
 295
 296                        vecp->i_addr = ip->i_df.if_u1.if_data;
 297                        /*
 298                         * Round i_bytes up to a word boundary.
 299                         * The underlying memory is guaranteed to
 300                         * to be there by xfs_idata_realloc().
 301                         */
 302                        data_bytes = roundup(ip->i_df.if_bytes, 4);
 303                        ASSERT((ip->i_df.if_real_bytes == 0) ||
 304                               (ip->i_df.if_real_bytes == data_bytes));
 305                        vecp->i_len = (int)data_bytes;
 306                        vecp->i_type = XLOG_REG_TYPE_ILOCAL;
 307                        vecp++;
 308                        nvecs++;
 309                        iip->ili_format.ilf_dsize = (unsigned)data_bytes;
 310                } else {
 311                        iip->ili_fields &= ~XFS_ILOG_DDATA;
 312                }
 313                break;
 314
 315        case XFS_DINODE_FMT_DEV:
 316                iip->ili_fields &=
 317                        ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
 318                          XFS_ILOG_DEXT | XFS_ILOG_UUID);
 319                if (iip->ili_fields & XFS_ILOG_DEV) {
 320                        iip->ili_format.ilf_u.ilfu_rdev =
 321                                ip->i_df.if_u2.if_rdev;
 322                }
 323                break;
 324
 325        case XFS_DINODE_FMT_UUID:
 326                iip->ili_fields &=
 327                        ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
 328                          XFS_ILOG_DEXT | XFS_ILOG_DEV);
 329                if (iip->ili_fields & XFS_ILOG_UUID) {
 330                        iip->ili_format.ilf_u.ilfu_uuid =
 331                                ip->i_df.if_u2.if_uuid;
 332                }
 333                break;
 334
 335        default:
 336                ASSERT(0);
 337                break;
 338        }
 339
 340        /*
 341         * If there are no attributes associated with the file, then we're done.
 342         */
 343        if (!XFS_IFORK_Q(ip)) {
 344                iip->ili_fields &=
 345                        ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT);
 346                goto out;
 347        }
 348
 349        switch (ip->i_d.di_aformat) {
 350        case XFS_DINODE_FMT_EXTENTS:
 351                iip->ili_fields &=
 352                        ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
 353
 354                if ((iip->ili_fields & XFS_ILOG_AEXT) &&
 355                    ip->i_d.di_anextents > 0 &&
 356                    ip->i_afp->if_bytes > 0) {
 357                        ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) ==
 358                                ip->i_d.di_anextents);
 359                        ASSERT(ip->i_afp->if_u1.if_extents != NULL);
 360#ifdef XFS_NATIVE_HOST
 361                        /*
 362                         * There are not delayed allocation extents
 363                         * for attributes, so just point at the array.
 364                         */
 365                        vecp->i_addr = ip->i_afp->if_u1.if_extents;
 366                        vecp->i_len = ip->i_afp->if_bytes;
 367                        vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
 368#else
 369                        ASSERT(iip->ili_aextents_buf == NULL);
 370                        xfs_inode_item_format_extents(ip, vecp,
 371                                        XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
 372#endif
 373                        iip->ili_format.ilf_asize = vecp->i_len;
 374                        vecp++;
 375                        nvecs++;
 376                } else {
 377                        iip->ili_fields &= ~XFS_ILOG_AEXT;
 378                }
 379                break;
 380
 381        case XFS_DINODE_FMT_BTREE:
 382                iip->ili_fields &=
 383                        ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
 384
 385                if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
 386                    ip->i_afp->if_broot_bytes > 0) {
 387                        ASSERT(ip->i_afp->if_broot != NULL);
 388
 389                        vecp->i_addr = ip->i_afp->if_broot;
 390                        vecp->i_len = ip->i_afp->if_broot_bytes;
 391                        vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
 392                        vecp++;
 393                        nvecs++;
 394                        iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
 395                } else {
 396                        iip->ili_fields &= ~XFS_ILOG_ABROOT;
 397                }
 398                break;
 399
 400        case XFS_DINODE_FMT_LOCAL:
 401                iip->ili_fields &=
 402                        ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
 403
 404                if ((iip->ili_fields & XFS_ILOG_ADATA) &&
 405                    ip->i_afp->if_bytes > 0) {
 406                        ASSERT(ip->i_afp->if_u1.if_data != NULL);
 407
 408                        vecp->i_addr = ip->i_afp->if_u1.if_data;
 409                        /*
 410                         * Round i_bytes up to a word boundary.
 411                         * The underlying memory is guaranteed to
 412                         * to be there by xfs_idata_realloc().
 413                         */
 414                        data_bytes = roundup(ip->i_afp->if_bytes, 4);
 415                        ASSERT((ip->i_afp->if_real_bytes == 0) ||
 416                               (ip->i_afp->if_real_bytes == data_bytes));
 417                        vecp->i_len = (int)data_bytes;
 418                        vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL;
 419                        vecp++;
 420                        nvecs++;
 421                        iip->ili_format.ilf_asize = (unsigned)data_bytes;
 422                } else {
 423                        iip->ili_fields &= ~XFS_ILOG_ADATA;
 424                }
 425                break;
 426
 427        default:
 428                ASSERT(0);
 429                break;
 430        }
 431
 432out:
 433        /*
 434         * Now update the log format that goes out to disk from the in-core
 435         * values.  We always write the inode core to make the arithmetic
 436         * games in recovery easier, which isn't a big deal as just about any
 437         * transaction would dirty it anyway.
 438         */
 439        iip->ili_format.ilf_fields = XFS_ILOG_CORE |
 440                (iip->ili_fields & ~XFS_ILOG_TIMESTAMP);
 441        iip->ili_format.ilf_size = nvecs;
 442}
 443
 444
 445/*
 446 * This is called to pin the inode associated with the inode log
 447 * item in memory so it cannot be written out.
 448 */
 449STATIC void
 450xfs_inode_item_pin(
 451        struct xfs_log_item     *lip)
 452{
 453        struct xfs_inode        *ip = INODE_ITEM(lip)->ili_inode;
 454
 455        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 456
 457        trace_xfs_inode_pin(ip, _RET_IP_);
 458        atomic_inc(&ip->i_pincount);
 459}
 460
 461
 462/*
 463 * This is called to unpin the inode associated with the inode log
 464 * item which was previously pinned with a call to xfs_inode_item_pin().
 465 *
 466 * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
 467 */
 468STATIC void
 469xfs_inode_item_unpin(
 470        struct xfs_log_item     *lip,
 471        int                     remove)
 472{
 473        struct xfs_inode        *ip = INODE_ITEM(lip)->ili_inode;
 474
 475        trace_xfs_inode_unpin(ip, _RET_IP_);
 476        ASSERT(atomic_read(&ip->i_pincount) > 0);
 477        if (atomic_dec_and_test(&ip->i_pincount))
 478                wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
 479}
 480
 481STATIC uint
 482xfs_inode_item_push(
 483        struct xfs_log_item     *lip,
 484        struct list_head        *buffer_list)
 485{
 486        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
 487        struct xfs_inode        *ip = iip->ili_inode;
 488        struct xfs_buf          *bp = NULL;
 489        uint                    rval = XFS_ITEM_SUCCESS;
 490        int                     error;
 491
 492        if (xfs_ipincount(ip) > 0)
 493                return XFS_ITEM_PINNED;
 494
 495        if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
 496                return XFS_ITEM_LOCKED;
 497
 498        /*
 499         * Re-check the pincount now that we stabilized the value by
 500         * taking the ilock.
 501         */
 502        if (xfs_ipincount(ip) > 0) {
 503                rval = XFS_ITEM_PINNED;
 504                goto out_unlock;
 505        }
 506
 507        /*
 508         * Stale inode items should force out the iclog.
 509         */
 510        if (ip->i_flags & XFS_ISTALE) {
 511                rval = XFS_ITEM_PINNED;
 512                goto out_unlock;
 513        }
 514
 515        /*
 516         * Someone else is already flushing the inode.  Nothing we can do
 517         * here but wait for the flush to finish and remove the item from
 518         * the AIL.
 519         */
 520        if (!xfs_iflock_nowait(ip)) {
 521                rval = XFS_ITEM_FLUSHING;
 522                goto out_unlock;
 523        }
 524
 525        ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
 526        ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
 527
 528        spin_unlock(&lip->li_ailp->xa_lock);
 529
 530        error = xfs_iflush(ip, &bp);
 531        if (!error) {
 532                if (!xfs_buf_delwri_queue(bp, buffer_list))
 533                        rval = XFS_ITEM_FLUSHING;
 534                xfs_buf_relse(bp);
 535        }
 536
 537        spin_lock(&lip->li_ailp->xa_lock);
 538out_unlock:
 539        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 540        return rval;
 541}
 542
 543/*
 544 * Unlock the inode associated with the inode log item.
 545 * Clear the fields of the inode and inode log item that
 546 * are specific to the current transaction.  If the
 547 * hold flags is set, do not unlock the inode.
 548 */
 549STATIC void
 550xfs_inode_item_unlock(
 551        struct xfs_log_item     *lip)
 552{
 553        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
 554        struct xfs_inode        *ip = iip->ili_inode;
 555        unsigned short          lock_flags;
 556
 557        ASSERT(ip->i_itemp != NULL);
 558        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 559
 560        /*
 561         * If the inode needed a separate buffer with which to log
 562         * its extents, then free it now.
 563         */
 564        if (iip->ili_extents_buf != NULL) {
 565                ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS);
 566                ASSERT(ip->i_d.di_nextents > 0);
 567                ASSERT(iip->ili_fields & XFS_ILOG_DEXT);
 568                ASSERT(ip->i_df.if_bytes > 0);
 569                kmem_free(iip->ili_extents_buf);
 570                iip->ili_extents_buf = NULL;
 571        }
 572        if (iip->ili_aextents_buf != NULL) {
 573                ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS);
 574                ASSERT(ip->i_d.di_anextents > 0);
 575                ASSERT(iip->ili_fields & XFS_ILOG_AEXT);
 576                ASSERT(ip->i_afp->if_bytes > 0);
 577                kmem_free(iip->ili_aextents_buf);
 578                iip->ili_aextents_buf = NULL;
 579        }
 580
 581        lock_flags = iip->ili_lock_flags;
 582        iip->ili_lock_flags = 0;
 583        if (lock_flags)
 584                xfs_iunlock(ip, lock_flags);
 585}
 586
 587/*
 588 * This is called to find out where the oldest active copy of the inode log
 589 * item in the on disk log resides now that the last log write of it completed
 590 * at the given lsn.  Since we always re-log all dirty data in an inode, the
 591 * latest copy in the on disk log is the only one that matters.  Therefore,
 592 * simply return the given lsn.
 593 *
 594 * If the inode has been marked stale because the cluster is being freed, we
 595 * don't want to (re-)insert this inode into the AIL. There is a race condition
 596 * where the cluster buffer may be unpinned before the inode is inserted into
 597 * the AIL during transaction committed processing. If the buffer is unpinned
 598 * before the inode item has been committed and inserted, then it is possible
 599 * for the buffer to be written and IO completes before the inode is inserted
 600 * into the AIL. In that case, we'd be inserting a clean, stale inode into the
 601 * AIL which will never get removed. It will, however, get reclaimed which
 602 * triggers an assert in xfs_inode_free() complaining about freein an inode
 603 * still in the AIL.
 604 *
 605 * To avoid this, just unpin the inode directly and return a LSN of -1 so the
 606 * transaction committed code knows that it does not need to do any further
 607 * processing on the item.
 608 */
 609STATIC xfs_lsn_t
 610xfs_inode_item_committed(
 611        struct xfs_log_item     *lip,
 612        xfs_lsn_t               lsn)
 613{
 614        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
 615        struct xfs_inode        *ip = iip->ili_inode;
 616
 617        if (xfs_iflags_test(ip, XFS_ISTALE)) {
 618                xfs_inode_item_unpin(lip, 0);
 619                return -1;
 620        }
 621        return lsn;
 622}
 623
 624/*
 625 * XXX rcc - this one really has to do something.  Probably needs
 626 * to stamp in a new field in the incore inode.
 627 */
 628STATIC void
 629xfs_inode_item_committing(
 630        struct xfs_log_item     *lip,
 631        xfs_lsn_t               lsn)
 632{
 633        INODE_ITEM(lip)->ili_last_lsn = lsn;
 634}
 635
 636/*
 637 * This is the ops vector shared by all buf log items.
 638 */
 639static const struct xfs_item_ops xfs_inode_item_ops = {
 640        .iop_size       = xfs_inode_item_size,
 641        .iop_format     = xfs_inode_item_format,
 642        .iop_pin        = xfs_inode_item_pin,
 643        .iop_unpin      = xfs_inode_item_unpin,
 644        .iop_unlock     = xfs_inode_item_unlock,
 645        .iop_committed  = xfs_inode_item_committed,
 646        .iop_push       = xfs_inode_item_push,
 647        .iop_committing = xfs_inode_item_committing
 648};
 649
 650
 651/*
 652 * Initialize the inode log item for a newly allocated (in-core) inode.
 653 */
 654void
 655xfs_inode_item_init(
 656        struct xfs_inode        *ip,
 657        struct xfs_mount        *mp)
 658{
 659        struct xfs_inode_log_item *iip;
 660
 661        ASSERT(ip->i_itemp == NULL);
 662        iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
 663
 664        iip->ili_inode = ip;
 665        xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
 666                                                &xfs_inode_item_ops);
 667        iip->ili_format.ilf_type = XFS_LI_INODE;
 668        iip->ili_format.ilf_ino = ip->i_ino;
 669        iip->ili_format.ilf_blkno = ip->i_imap.im_blkno;
 670        iip->ili_format.ilf_len = ip->i_imap.im_len;
 671        iip->ili_format.ilf_boffset = ip->i_imap.im_boffset;
 672}
 673
 674/*
 675 * Free the inode log item and any memory hanging off of it.
 676 */
 677void
 678xfs_inode_item_destroy(
 679        xfs_inode_t     *ip)
 680{
 681#ifdef XFS_TRANS_DEBUG
 682        if (ip->i_itemp->ili_root_size != 0) {
 683                kmem_free(ip->i_itemp->ili_orig_root);
 684        }
 685#endif
 686        kmem_zone_free(xfs_ili_zone, ip->i_itemp);
 687}
 688
 689
 690/*
 691 * This is the inode flushing I/O completion routine.  It is called
 692 * from interrupt level when the buffer containing the inode is
 693 * flushed to disk.  It is responsible for removing the inode item
 694 * from the AIL if it has not been re-logged, and unlocking the inode's
 695 * flush lock.
 696 *
 697 * To reduce AIL lock traffic as much as possible, we scan the buffer log item
 698 * list for other inodes that will run this function. We remove them from the
 699 * buffer list so we can process all the inode IO completions in one AIL lock
 700 * traversal.
 701 */
 702void
 703xfs_iflush_done(
 704        struct xfs_buf          *bp,
 705        struct xfs_log_item     *lip)
 706{
 707        struct xfs_inode_log_item *iip;
 708        struct xfs_log_item     *blip;
 709        struct xfs_log_item     *next;
 710        struct xfs_log_item     *prev;
 711        struct xfs_ail          *ailp = lip->li_ailp;
 712        int                     need_ail = 0;
 713
 714        /*
 715         * Scan the buffer IO completions for other inodes being completed and
 716         * attach them to the current inode log item.
 717         */
 718        blip = bp->b_fspriv;
 719        prev = NULL;
 720        while (blip != NULL) {
 721                if (lip->li_cb != xfs_iflush_done) {
 722                        prev = blip;
 723                        blip = blip->li_bio_list;
 724                        continue;
 725                }
 726
 727                /* remove from list */
 728                next = blip->li_bio_list;
 729                if (!prev) {
 730                        bp->b_fspriv = next;
 731                } else {
 732                        prev->li_bio_list = next;
 733                }
 734
 735                /* add to current list */
 736                blip->li_bio_list = lip->li_bio_list;
 737                lip->li_bio_list = blip;
 738
 739                /*
 740                 * while we have the item, do the unlocked check for needing
 741                 * the AIL lock.
 742                 */
 743                iip = INODE_ITEM(blip);
 744                if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
 745                        need_ail++;
 746
 747                blip = next;
 748        }
 749
 750        /* make sure we capture the state of the initial inode. */
 751        iip = INODE_ITEM(lip);
 752        if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
 753                need_ail++;
 754
 755        /*
 756         * We only want to pull the item from the AIL if it is
 757         * actually there and its location in the log has not
 758         * changed since we started the flush.  Thus, we only bother
 759         * if the ili_logged flag is set and the inode's lsn has not
 760         * changed.  First we check the lsn outside
 761         * the lock since it's cheaper, and then we recheck while
 762         * holding the lock before removing the inode from the AIL.
 763         */
 764        if (need_ail) {
 765                struct xfs_log_item *log_items[need_ail];
 766                int i = 0;
 767                spin_lock(&ailp->xa_lock);
 768                for (blip = lip; blip; blip = blip->li_bio_list) {
 769                        iip = INODE_ITEM(blip);
 770                        if (iip->ili_logged &&
 771                            blip->li_lsn == iip->ili_flush_lsn) {
 772                                log_items[i++] = blip;
 773                        }
 774                        ASSERT(i <= need_ail);
 775                }
 776                /* xfs_trans_ail_delete_bulk() drops the AIL lock. */
 777                xfs_trans_ail_delete_bulk(ailp, log_items, i,
 778                                          SHUTDOWN_CORRUPT_INCORE);
 779        }
 780
 781
 782        /*
 783         * clean up and unlock the flush lock now we are done. We can clear the
 784         * ili_last_fields bits now that we know that the data corresponding to
 785         * them is safely on disk.
 786         */
 787        for (blip = lip; blip; blip = next) {
 788                next = blip->li_bio_list;
 789                blip->li_bio_list = NULL;
 790
 791                iip = INODE_ITEM(blip);
 792                iip->ili_logged = 0;
 793                iip->ili_last_fields = 0;
 794                xfs_ifunlock(iip->ili_inode);
 795        }
 796}
 797
 798/*
 799 * This is the inode flushing abort routine.  It is called from xfs_iflush when
 800 * the filesystem is shutting down to clean up the inode state.  It is
 801 * responsible for removing the inode item from the AIL if it has not been
 802 * re-logged, and unlocking the inode's flush lock.
 803 */
 804void
 805xfs_iflush_abort(
 806        xfs_inode_t             *ip,
 807        bool                    stale)
 808{
 809        xfs_inode_log_item_t    *iip = ip->i_itemp;
 810
 811        if (iip) {
 812                struct xfs_ail  *ailp = iip->ili_item.li_ailp;
 813                if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
 814                        spin_lock(&ailp->xa_lock);
 815                        if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
 816                                /* xfs_trans_ail_delete() drops the AIL lock. */
 817                                xfs_trans_ail_delete(ailp, &iip->ili_item,
 818                                                stale ?
 819                                                     SHUTDOWN_LOG_IO_ERROR :
 820                                                     SHUTDOWN_CORRUPT_INCORE);
 821                        } else
 822                                spin_unlock(&ailp->xa_lock);
 823                }
 824                iip->ili_logged = 0;
 825                /*
 826                 * Clear the ili_last_fields bits now that we know that the
 827                 * data corresponding to them is safely on disk.
 828                 */
 829                iip->ili_last_fields = 0;
 830                /*
 831                 * Clear the inode logging fields so no more flushes are
 832                 * attempted.
 833                 */
 834                iip->ili_fields = 0;
 835        }
 836        /*
 837         * Release the inode's flush lock since we're done with it.
 838         */
 839        xfs_ifunlock(ip);
 840}
 841
 842void
 843xfs_istale_done(
 844        struct xfs_buf          *bp,
 845        struct xfs_log_item     *lip)
 846{
 847        xfs_iflush_abort(INODE_ITEM(lip)->ili_inode, true);
 848}
 849
 850/*
 851 * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
 852 * (which can have different field alignments) to the native version
 853 */
 854int
 855xfs_inode_item_format_convert(
 856        xfs_log_iovec_t         *buf,
 857        xfs_inode_log_format_t  *in_f)
 858{
 859        if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
 860                xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
 861
 862                in_f->ilf_type = in_f32->ilf_type;
 863                in_f->ilf_size = in_f32->ilf_size;
 864                in_f->ilf_fields = in_f32->ilf_fields;
 865                in_f->ilf_asize = in_f32->ilf_asize;
 866                in_f->ilf_dsize = in_f32->ilf_dsize;
 867                in_f->ilf_ino = in_f32->ilf_ino;
 868                /* copy biggest field of ilf_u */
 869                memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
 870                       in_f32->ilf_u.ilfu_uuid.__u_bits,
 871                       sizeof(uuid_t));
 872                in_f->ilf_blkno = in_f32->ilf_blkno;
 873                in_f->ilf_len = in_f32->ilf_len;
 874                in_f->ilf_boffset = in_f32->ilf_boffset;
 875                return 0;
 876        } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
 877                xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
 878
 879                in_f->ilf_type = in_f64->ilf_type;
 880                in_f->ilf_size = in_f64->ilf_size;
 881                in_f->ilf_fields = in_f64->ilf_fields;
 882                in_f->ilf_asize = in_f64->ilf_asize;
 883                in_f->ilf_dsize = in_f64->ilf_dsize;
 884                in_f->ilf_ino = in_f64->ilf_ino;
 885                /* copy biggest field of ilf_u */
 886                memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
 887                       in_f64->ilf_u.ilfu_uuid.__u_bits,
 888                       sizeof(uuid_t));
 889                in_f->ilf_blkno = in_f64->ilf_blkno;
 890                in_f->ilf_len = in_f64->ilf_len;
 891                in_f->ilf_boffset = in_f64->ilf_boffset;
 892                return 0;
 893        }
 894        return EFSCORRUPTED;
 895}
 896
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.