linux/fs/xfs/xfs_attr_leaf.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_mount.h"
  27#include "xfs_da_btree.h"
  28#include "xfs_bmap_btree.h"
  29#include "xfs_alloc_btree.h"
  30#include "xfs_ialloc_btree.h"
  31#include "xfs_alloc.h"
  32#include "xfs_btree.h"
  33#include "xfs_attr_sf.h"
  34#include "xfs_dinode.h"
  35#include "xfs_inode.h"
  36#include "xfs_inode_item.h"
  37#include "xfs_bmap.h"
  38#include "xfs_attr.h"
  39#include "xfs_attr_leaf.h"
  40#include "xfs_error.h"
  41#include "xfs_trace.h"
  42
  43/*
  44 * xfs_attr_leaf.c
  45 *
  46 * Routines to implement leaf blocks of attributes as Btrees of hashed names.
  47 */
  48
  49/*========================================================================
  50 * Function prototypes for the kernel.
  51 *========================================================================*/
  52
  53/*
  54 * Routines used for growing the Btree.
  55 */
  56STATIC int xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t which_block,
  57                                struct xfs_buf **bpp);
  58STATIC int xfs_attr_leaf_add_work(struct xfs_buf *leaf_buffer,
  59                                  xfs_da_args_t *args, int freemap_index);
  60STATIC void xfs_attr_leaf_compact(xfs_trans_t *tp, struct xfs_buf *leaf_buffer);
  61STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state,
  62                                                   xfs_da_state_blk_t *blk1,
  63                                                   xfs_da_state_blk_t *blk2);
  64STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
  65                                           xfs_da_state_blk_t *leaf_blk_1,
  66                                           xfs_da_state_blk_t *leaf_blk_2,
  67                                           int *number_entries_in_blk1,
  68                                           int *number_usedbytes_in_blk1);
  69
  70/*
  71 * Routines used for shrinking the Btree.
  72 */
  73STATIC int xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
  74                                  struct xfs_buf *bp, int level);
  75STATIC int xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
  76                                  struct xfs_buf *bp);
  77STATIC int xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
  78                                   xfs_dablk_t blkno, int blkcnt);
  79
  80/*
  81 * Utility routines.
  82 */
  83STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf,
  84                                         int src_start,
  85                                         xfs_attr_leafblock_t *dst_leaf,
  86                                         int dst_start, int move_count,
  87                                         xfs_mount_t *mp);
  88STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
  89
  90/*========================================================================
  91 * Namespace helper routines
  92 *========================================================================*/
  93
  94/*
  95 * If namespace bits don't match return 0.
  96 * If all match then return 1.
  97 */
  98STATIC int
  99xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
 100{
 101        return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
 102}
 103
 104
 105/*========================================================================
 106 * External routines when attribute fork size < XFS_LITINO(mp).
 107 *========================================================================*/
 108
 109/*
 110 * Query whether the requested number of additional bytes of extended
 111 * attribute space will be able to fit inline.
 112 *
 113 * Returns zero if not, else the di_forkoff fork offset to be used in the
 114 * literal area for attribute data once the new bytes have been added.
 115 *
 116 * di_forkoff must be 8 byte aligned, hence is stored as a >>3 value;
 117 * special case for dev/uuid inodes, they have fixed size data forks.
 118 */
 119int
 120xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
 121{
 122        int offset;
 123        int minforkoff; /* lower limit on valid forkoff locations */
 124        int maxforkoff; /* upper limit on valid forkoff locations */
 125        int dsize;
 126        xfs_mount_t *mp = dp->i_mount;
 127
 128        offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
 129
 130        switch (dp->i_d.di_format) {
 131        case XFS_DINODE_FMT_DEV:
 132                minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
 133                return (offset >= minforkoff) ? minforkoff : 0;
 134        case XFS_DINODE_FMT_UUID:
 135                minforkoff = roundup(sizeof(uuid_t), 8) >> 3;
 136                return (offset >= minforkoff) ? minforkoff : 0;
 137        }
 138
 139        /*
 140         * If the requested numbers of bytes is smaller or equal to the
 141         * current attribute fork size we can always proceed.
 142         *
 143         * Note that if_bytes in the data fork might actually be larger than
 144         * the current data fork size is due to delalloc extents. In that
 145         * case either the extent count will go down when they are converted
 146         * to real extents, or the delalloc conversion will take care of the
 147         * literal area rebalancing.
 148         */
 149        if (bytes <= XFS_IFORK_ASIZE(dp))
 150                return dp->i_d.di_forkoff;
 151
 152        /*
 153         * For attr2 we can try to move the forkoff if there is space in the
 154         * literal area, but for the old format we are done if there is no
 155         * space in the fixed attribute fork.
 156         */
 157        if (!(mp->m_flags & XFS_MOUNT_ATTR2))
 158                return 0;
 159
 160        dsize = dp->i_df.if_bytes;
 161
 162        switch (dp->i_d.di_format) {
 163        case XFS_DINODE_FMT_EXTENTS:
 164                /*
 165                 * If there is no attr fork and the data fork is extents, 
 166                 * determine if creating the default attr fork will result
 167                 * in the extents form migrating to btree. If so, the
 168                 * minimum offset only needs to be the space required for
 169                 * the btree root.
 170                 */
 171                if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
 172                    xfs_default_attroffset(dp))
 173                        dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
 174                break;
 175        case XFS_DINODE_FMT_BTREE:
 176                /*
 177                 * If we have a data btree then keep forkoff if we have one,
 178                 * otherwise we are adding a new attr, so then we set
 179                 * minforkoff to where the btree root can finish so we have
 180                 * plenty of room for attrs
 181                 */
 182                if (dp->i_d.di_forkoff) {
 183                        if (offset < dp->i_d.di_forkoff)
 184                                return 0;
 185                        return dp->i_d.di_forkoff;
 186                }
 187                dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
 188                break;
 189        }
 190
 191        /*
 192         * A data fork btree root must have space for at least
 193         * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
 194         */
 195        minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
 196        minforkoff = roundup(minforkoff, 8) >> 3;
 197
 198        /* attr fork btree root can have at least this many key/ptr pairs */
 199        maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
 200        maxforkoff = maxforkoff >> 3;   /* rounded down */
 201
 202        if (offset >= maxforkoff)
 203                return maxforkoff;
 204        if (offset >= minforkoff)
 205                return offset;
 206        return 0;
 207}
 208
 209/*
 210 * Switch on the ATTR2 superblock bit (implies also FEATURES2)
 211 */
 212STATIC void
 213xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
 214{
 215        if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
 216            !(xfs_sb_version_hasattr2(&mp->m_sb))) {
 217                spin_lock(&mp->m_sb_lock);
 218                if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
 219                        xfs_sb_version_addattr2(&mp->m_sb);
 220                        spin_unlock(&mp->m_sb_lock);
 221                        xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
 222                } else
 223                        spin_unlock(&mp->m_sb_lock);
 224        }
 225}
 226
 227/*
 228 * Create the initial contents of a shortform attribute list.
 229 */
 230void
 231xfs_attr_shortform_create(xfs_da_args_t *args)
 232{
 233        xfs_attr_sf_hdr_t *hdr;
 234        xfs_inode_t *dp;
 235        xfs_ifork_t *ifp;
 236
 237        trace_xfs_attr_sf_create(args);
 238
 239        dp = args->dp;
 240        ASSERT(dp != NULL);
 241        ifp = dp->i_afp;
 242        ASSERT(ifp != NULL);
 243        ASSERT(ifp->if_bytes == 0);
 244        if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) {
 245                ifp->if_flags &= ~XFS_IFEXTENTS;        /* just in case */
 246                dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL;
 247                ifp->if_flags |= XFS_IFINLINE;
 248        } else {
 249                ASSERT(ifp->if_flags & XFS_IFINLINE);
 250        }
 251        xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
 252        hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
 253        hdr->count = 0;
 254        hdr->totsize = cpu_to_be16(sizeof(*hdr));
 255        xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
 256}
 257
 258/*
 259 * Add a name/value pair to the shortform attribute list.
 260 * Overflow from the inode has already been checked for.
 261 */
 262void
 263xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
 264{
 265        xfs_attr_shortform_t *sf;
 266        xfs_attr_sf_entry_t *sfe;
 267        int i, offset, size;
 268        xfs_mount_t *mp;
 269        xfs_inode_t *dp;
 270        xfs_ifork_t *ifp;
 271
 272        trace_xfs_attr_sf_add(args);
 273
 274        dp = args->dp;
 275        mp = dp->i_mount;
 276        dp->i_d.di_forkoff = forkoff;
 277
 278        ifp = dp->i_afp;
 279        ASSERT(ifp->if_flags & XFS_IFINLINE);
 280        sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
 281        sfe = &sf->list[0];
 282        for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
 283#ifdef DEBUG
 284                if (sfe->namelen != args->namelen)
 285                        continue;
 286                if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
 287                        continue;
 288                if (!xfs_attr_namesp_match(args->flags, sfe->flags))
 289                        continue;
 290                ASSERT(0);
 291#endif
 292        }
 293
 294        offset = (char *)sfe - (char *)sf;
 295        size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
 296        xfs_idata_realloc(dp, size, XFS_ATTR_FORK);
 297        sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
 298        sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset);
 299
 300        sfe->namelen = args->namelen;
 301        sfe->valuelen = args->valuelen;
 302        sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
 303        memcpy(sfe->nameval, args->name, args->namelen);
 304        memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
 305        sf->hdr.count++;
 306        be16_add_cpu(&sf->hdr.totsize, size);
 307        xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
 308
 309        xfs_sbversion_add_attr2(mp, args->trans);
 310}
 311
 312/*
 313 * After the last attribute is removed revert to original inode format,
 314 * making all literal area available to the data fork once more.
 315 */
 316STATIC void
 317xfs_attr_fork_reset(
 318        struct xfs_inode        *ip,
 319        struct xfs_trans        *tp)
 320{
 321        xfs_idestroy_fork(ip, XFS_ATTR_FORK);
 322        ip->i_d.di_forkoff = 0;
 323        ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
 324
 325        ASSERT(ip->i_d.di_anextents == 0);
 326        ASSERT(ip->i_afp == NULL);
 327
 328        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 329}
 330
 331/*
 332 * Remove an attribute from the shortform attribute list structure.
 333 */
 334int
 335xfs_attr_shortform_remove(xfs_da_args_t *args)
 336{
 337        xfs_attr_shortform_t *sf;
 338        xfs_attr_sf_entry_t *sfe;
 339        int base, size=0, end, totsize, i;
 340        xfs_mount_t *mp;
 341        xfs_inode_t *dp;
 342
 343        trace_xfs_attr_sf_remove(args);
 344
 345        dp = args->dp;
 346        mp = dp->i_mount;
 347        base = sizeof(xfs_attr_sf_hdr_t);
 348        sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
 349        sfe = &sf->list[0];
 350        end = sf->hdr.count;
 351        for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
 352                                        base += size, i++) {
 353                size = XFS_ATTR_SF_ENTSIZE(sfe);
 354                if (sfe->namelen != args->namelen)
 355                        continue;
 356                if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
 357                        continue;
 358                if (!xfs_attr_namesp_match(args->flags, sfe->flags))
 359                        continue;
 360                break;
 361        }
 362        if (i == end)
 363                return(XFS_ERROR(ENOATTR));
 364
 365        /*
 366         * Fix up the attribute fork data, covering the hole
 367         */
 368        end = base + size;
 369        totsize = be16_to_cpu(sf->hdr.totsize);
 370        if (end != totsize)
 371                memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
 372        sf->hdr.count--;
 373        be16_add_cpu(&sf->hdr.totsize, -size);
 374
 375        /*
 376         * Fix up the start offset of the attribute fork
 377         */
 378        totsize -= size;
 379        if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
 380            (mp->m_flags & XFS_MOUNT_ATTR2) &&
 381            (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
 382            !(args->op_flags & XFS_DA_OP_ADDNAME)) {
 383                xfs_attr_fork_reset(dp, args->trans);
 384        } else {
 385                xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
 386                dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
 387                ASSERT(dp->i_d.di_forkoff);
 388                ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
 389                                (args->op_flags & XFS_DA_OP_ADDNAME) ||
 390                                !(mp->m_flags & XFS_MOUNT_ATTR2) ||
 391                                dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
 392                xfs_trans_log_inode(args->trans, dp,
 393                                        XFS_ILOG_CORE | XFS_ILOG_ADATA);
 394        }
 395
 396        xfs_sbversion_add_attr2(mp, args->trans);
 397
 398        return(0);
 399}
 400
 401/*
 402 * Look up a name in a shortform attribute list structure.
 403 */
 404/*ARGSUSED*/
 405int
 406xfs_attr_shortform_lookup(xfs_da_args_t *args)
 407{
 408        xfs_attr_shortform_t *sf;
 409        xfs_attr_sf_entry_t *sfe;
 410        int i;
 411        xfs_ifork_t *ifp;
 412
 413        trace_xfs_attr_sf_lookup(args);
 414
 415        ifp = args->dp->i_afp;
 416        ASSERT(ifp->if_flags & XFS_IFINLINE);
 417        sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
 418        sfe = &sf->list[0];
 419        for (i = 0; i < sf->hdr.count;
 420                                sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
 421                if (sfe->namelen != args->namelen)
 422                        continue;
 423                if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
 424                        continue;
 425                if (!xfs_attr_namesp_match(args->flags, sfe->flags))
 426                        continue;
 427                return(XFS_ERROR(EEXIST));
 428        }
 429        return(XFS_ERROR(ENOATTR));
 430}
 431
 432/*
 433 * Look up a name in a shortform attribute list structure.
 434 */
 435/*ARGSUSED*/
 436int
 437xfs_attr_shortform_getvalue(xfs_da_args_t *args)
 438{
 439        xfs_attr_shortform_t *sf;
 440        xfs_attr_sf_entry_t *sfe;
 441        int i;
 442
 443        ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE);
 444        sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
 445        sfe = &sf->list[0];
 446        for (i = 0; i < sf->hdr.count;
 447                                sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
 448                if (sfe->namelen != args->namelen)
 449                        continue;
 450                if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
 451                        continue;
 452                if (!xfs_attr_namesp_match(args->flags, sfe->flags))
 453                        continue;
 454                if (args->flags & ATTR_KERNOVAL) {
 455                        args->valuelen = sfe->valuelen;
 456                        return(XFS_ERROR(EEXIST));
 457                }
 458                if (args->valuelen < sfe->valuelen) {
 459                        args->valuelen = sfe->valuelen;
 460                        return(XFS_ERROR(ERANGE));
 461                }
 462                args->valuelen = sfe->valuelen;
 463                memcpy(args->value, &sfe->nameval[args->namelen],
 464                                                    args->valuelen);
 465                return(XFS_ERROR(EEXIST));
 466        }
 467        return(XFS_ERROR(ENOATTR));
 468}
 469
 470/*
 471 * Convert from using the shortform to the leaf.
 472 */
 473int
 474xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
 475{
 476        xfs_inode_t *dp;
 477        xfs_attr_shortform_t *sf;
 478        xfs_attr_sf_entry_t *sfe;
 479        xfs_da_args_t nargs;
 480        char *tmpbuffer;
 481        int error, i, size;
 482        xfs_dablk_t blkno;
 483        struct xfs_buf *bp;
 484        xfs_ifork_t *ifp;
 485
 486        trace_xfs_attr_sf_to_leaf(args);
 487
 488        dp = args->dp;
 489        ifp = dp->i_afp;
 490        sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
 491        size = be16_to_cpu(sf->hdr.totsize);
 492        tmpbuffer = kmem_alloc(size, KM_SLEEP);
 493        ASSERT(tmpbuffer != NULL);
 494        memcpy(tmpbuffer, ifp->if_u1.if_data, size);
 495        sf = (xfs_attr_shortform_t *)tmpbuffer;
 496
 497        xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
 498        bp = NULL;
 499        error = xfs_da_grow_inode(args, &blkno);
 500        if (error) {
 501                /*
 502                 * If we hit an IO error middle of the transaction inside
 503                 * grow_inode(), we may have inconsistent data. Bail out.
 504                 */
 505                if (error == EIO)
 506                        goto out;
 507                xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
 508                memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
 509                goto out;
 510        }
 511
 512        ASSERT(blkno == 0);
 513        error = xfs_attr_leaf_create(args, blkno, &bp);
 514        if (error) {
 515                error = xfs_da_shrink_inode(args, 0, bp);
 516                bp = NULL;
 517                if (error)
 518                        goto out;
 519                xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
 520                memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
 521                goto out;
 522        }
 523
 524        memset((char *)&nargs, 0, sizeof(nargs));
 525        nargs.dp = dp;
 526        nargs.firstblock = args->firstblock;
 527        nargs.flist = args->flist;
 528        nargs.total = args->total;
 529        nargs.whichfork = XFS_ATTR_FORK;
 530        nargs.trans = args->trans;
 531        nargs.op_flags = XFS_DA_OP_OKNOENT;
 532
 533        sfe = &sf->list[0];
 534        for (i = 0; i < sf->hdr.count; i++) {
 535                nargs.name = sfe->nameval;
 536                nargs.namelen = sfe->namelen;
 537                nargs.value = &sfe->nameval[nargs.namelen];
 538                nargs.valuelen = sfe->valuelen;
 539                nargs.hashval = xfs_da_hashname(sfe->nameval,
 540                                                sfe->namelen);
 541                nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
 542                error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
 543                ASSERT(error == ENOATTR);
 544                error = xfs_attr_leaf_add(bp, &nargs);
 545                ASSERT(error != ENOSPC);
 546                if (error)
 547                        goto out;
 548                sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
 549        }
 550        error = 0;
 551
 552out:
 553        kmem_free(tmpbuffer);
 554        return(error);
 555}
 556
 557STATIC int
 558xfs_attr_shortform_compare(const void *a, const void *b)
 559{
 560        xfs_attr_sf_sort_t *sa, *sb;
 561
 562        sa = (xfs_attr_sf_sort_t *)a;
 563        sb = (xfs_attr_sf_sort_t *)b;
 564        if (sa->hash < sb->hash) {
 565                return(-1);
 566        } else if (sa->hash > sb->hash) {
 567                return(1);
 568        } else {
 569                return(sa->entno - sb->entno);
 570        }
 571}
 572
 573
 574#define XFS_ISRESET_CURSOR(cursor) \
 575        (!((cursor)->initted) && !((cursor)->hashval) && \
 576         !((cursor)->blkno) && !((cursor)->offset))
 577/*
 578 * Copy out entries of shortform attribute lists for attr_list().
 579 * Shortform attribute lists are not stored in hashval sorted order.
 580 * If the output buffer is not large enough to hold them all, then we
 581 * we have to calculate each entries' hashvalue and sort them before
 582 * we can begin returning them to the user.
 583 */
 584/*ARGSUSED*/
 585int
 586xfs_attr_shortform_list(xfs_attr_list_context_t *context)
 587{
 588        attrlist_cursor_kern_t *cursor;
 589        xfs_attr_sf_sort_t *sbuf, *sbp;
 590        xfs_attr_shortform_t *sf;
 591        xfs_attr_sf_entry_t *sfe;
 592        xfs_inode_t *dp;
 593        int sbsize, nsbuf, count, i;
 594        int error;
 595
 596        ASSERT(context != NULL);
 597        dp = context->dp;
 598        ASSERT(dp != NULL);
 599        ASSERT(dp->i_afp != NULL);
 600        sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
 601        ASSERT(sf != NULL);
 602        if (!sf->hdr.count)
 603                return(0);
 604        cursor = context->cursor;
 605        ASSERT(cursor != NULL);
 606
 607        trace_xfs_attr_list_sf(context);
 608
 609        /*
 610         * If the buffer is large enough and the cursor is at the start,
 611         * do not bother with sorting since we will return everything in
 612         * one buffer and another call using the cursor won't need to be
 613         * made.
 614         * Note the generous fudge factor of 16 overhead bytes per entry.
 615         * If bufsize is zero then put_listent must be a search function
 616         * and can just scan through what we have.
 617         */
 618        if (context->bufsize == 0 ||
 619            (XFS_ISRESET_CURSOR(cursor) &&
 620             (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
 621                for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
 622                        error = context->put_listent(context,
 623                                           sfe->flags,
 624                                           sfe->nameval,
 625                                           (int)sfe->namelen,
 626                                           (int)sfe->valuelen,
 627                                           &sfe->nameval[sfe->namelen]);
 628
 629                        /*
 630                         * Either search callback finished early or
 631                         * didn't fit it all in the buffer after all.
 632                         */
 633                        if (context->seen_enough)
 634                                break;
 635
 636                        if (error)
 637                                return error;
 638                        sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
 639                }
 640                trace_xfs_attr_list_sf_all(context);
 641                return(0);
 642        }
 643
 644        /* do no more for a search callback */
 645        if (context->bufsize == 0)
 646                return 0;
 647
 648        /*
 649         * It didn't all fit, so we have to sort everything on hashval.
 650         */
 651        sbsize = sf->hdr.count * sizeof(*sbuf);
 652        sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
 653
 654        /*
 655         * Scan the attribute list for the rest of the entries, storing
 656         * the relevant info from only those that match into a buffer.
 657         */
 658        nsbuf = 0;
 659        for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
 660                if (unlikely(
 661                    ((char *)sfe < (char *)sf) ||
 662                    ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
 663                        XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
 664                                             XFS_ERRLEVEL_LOW,
 665                                             context->dp->i_mount, sfe);
 666                        kmem_free(sbuf);
 667                        return XFS_ERROR(EFSCORRUPTED);
 668                }
 669
 670                sbp->entno = i;
 671                sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
 672                sbp->name = sfe->nameval;
 673                sbp->namelen = sfe->namelen;
 674                /* These are bytes, and both on-disk, don't endian-flip */
 675                sbp->valuelen = sfe->valuelen;
 676                sbp->flags = sfe->flags;
 677                sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
 678                sbp++;
 679                nsbuf++;
 680        }
 681
 682        /*
 683         * Sort the entries on hash then entno.
 684         */
 685        xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
 686
 687        /*
 688         * Re-find our place IN THE SORTED LIST.
 689         */
 690        count = 0;
 691        cursor->initted = 1;
 692        cursor->blkno = 0;
 693        for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
 694                if (sbp->hash == cursor->hashval) {
 695                        if (cursor->offset == count) {
 696                                break;
 697                        }
 698                        count++;
 699                } else if (sbp->hash > cursor->hashval) {
 700                        break;
 701                }
 702        }
 703        if (i == nsbuf) {
 704                kmem_free(sbuf);
 705                return(0);
 706        }
 707
 708        /*
 709         * Loop putting entries into the user buffer.
 710         */
 711        for ( ; i < nsbuf; i++, sbp++) {
 712                if (cursor->hashval != sbp->hash) {
 713                        cursor->hashval = sbp->hash;
 714                        cursor->offset = 0;
 715                }
 716                error = context->put_listent(context,
 717                                        sbp->flags,
 718                                        sbp->name,
 719                                        sbp->namelen,
 720                                        sbp->valuelen,
 721                                        &sbp->name[sbp->namelen]);
 722                if (error)
 723                        return error;
 724                if (context->seen_enough)
 725                        break;
 726                cursor->offset++;
 727        }
 728
 729        kmem_free(sbuf);
 730        return(0);
 731}
 732
 733/*
 734 * Check a leaf attribute block to see if all the entries would fit into
 735 * a shortform attribute list.
 736 */
 737int
 738xfs_attr_shortform_allfit(
 739        struct xfs_buf  *bp,
 740        struct xfs_inode *dp)
 741{
 742        xfs_attr_leafblock_t *leaf;
 743        xfs_attr_leaf_entry_t *entry;
 744        xfs_attr_leaf_name_local_t *name_loc;
 745        int bytes, i;
 746
 747        leaf = bp->b_addr;
 748        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
 749
 750        entry = &leaf->entries[0];
 751        bytes = sizeof(struct xfs_attr_sf_hdr);
 752        for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
 753                if (entry->flags & XFS_ATTR_INCOMPLETE)
 754                        continue;               /* don't copy partial entries */
 755                if (!(entry->flags & XFS_ATTR_LOCAL))
 756                        return(0);
 757                name_loc = xfs_attr_leaf_name_local(leaf, i);
 758                if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
 759                        return(0);
 760                if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
 761                        return(0);
 762                bytes += sizeof(struct xfs_attr_sf_entry)-1
 763                                + name_loc->namelen
 764                                + be16_to_cpu(name_loc->valuelen);
 765        }
 766        if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
 767            (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
 768            (bytes == sizeof(struct xfs_attr_sf_hdr)))
 769                return(-1);
 770        return(xfs_attr_shortform_bytesfit(dp, bytes));
 771}
 772
 773/*
 774 * Convert a leaf attribute list to shortform attribute list
 775 */
 776int
 777xfs_attr_leaf_to_shortform(
 778        struct xfs_buf  *bp,
 779        xfs_da_args_t   *args,
 780        int             forkoff)
 781{
 782        xfs_attr_leafblock_t *leaf;
 783        xfs_attr_leaf_entry_t *entry;
 784        xfs_attr_leaf_name_local_t *name_loc;
 785        xfs_da_args_t nargs;
 786        xfs_inode_t *dp;
 787        char *tmpbuffer;
 788        int error, i;
 789
 790        trace_xfs_attr_leaf_to_sf(args);
 791
 792        dp = args->dp;
 793        tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP);
 794        ASSERT(tmpbuffer != NULL);
 795
 796        ASSERT(bp != NULL);
 797        memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(dp->i_mount));
 798        leaf = (xfs_attr_leafblock_t *)tmpbuffer;
 799        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
 800        memset(bp->b_addr, 0, XFS_LBSIZE(dp->i_mount));
 801
 802        /*
 803         * Clean out the prior contents of the attribute list.
 804         */
 805        error = xfs_da_shrink_inode(args, 0, bp);
 806        if (error)
 807                goto out;
 808
 809        if (forkoff == -1) {
 810                ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
 811                ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
 812                xfs_attr_fork_reset(dp, args->trans);
 813                goto out;
 814        }
 815
 816        xfs_attr_shortform_create(args);
 817
 818        /*
 819         * Copy the attributes
 820         */
 821        memset((char *)&nargs, 0, sizeof(nargs));
 822        nargs.dp = dp;
 823        nargs.firstblock = args->firstblock;
 824        nargs.flist = args->flist;
 825        nargs.total = args->total;
 826        nargs.whichfork = XFS_ATTR_FORK;
 827        nargs.trans = args->trans;
 828        nargs.op_flags = XFS_DA_OP_OKNOENT;
 829        entry = &leaf->entries[0];
 830        for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
 831                if (entry->flags & XFS_ATTR_INCOMPLETE)
 832                        continue;       /* don't copy partial entries */
 833                if (!entry->nameidx)
 834                        continue;
 835                ASSERT(entry->flags & XFS_ATTR_LOCAL);
 836                name_loc = xfs_attr_leaf_name_local(leaf, i);
 837                nargs.name = name_loc->nameval;
 838                nargs.namelen = name_loc->namelen;
 839                nargs.value = &name_loc->nameval[nargs.namelen];
 840                nargs.valuelen = be16_to_cpu(name_loc->valuelen);
 841                nargs.hashval = be32_to_cpu(entry->hashval);
 842                nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
 843                xfs_attr_shortform_add(&nargs, forkoff);
 844        }
 845        error = 0;
 846
 847out:
 848        kmem_free(tmpbuffer);
 849        return(error);
 850}
 851
 852/*
 853 * Convert from using a single leaf to a root node and a leaf.
 854 */
 855int
 856xfs_attr_leaf_to_node(xfs_da_args_t *args)
 857{
 858        xfs_attr_leafblock_t *leaf;
 859        xfs_da_intnode_t *node;
 860        xfs_inode_t *dp;
 861        struct xfs_buf *bp1, *bp2;
 862        xfs_dablk_t blkno;
 863        int error;
 864
 865        trace_xfs_attr_leaf_to_node(args);
 866
 867        dp = args->dp;
 868        bp1 = bp2 = NULL;
 869        error = xfs_da_grow_inode(args, &blkno);
 870        if (error)
 871                goto out;
 872        error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1,
 873                                             XFS_ATTR_FORK);
 874        if (error)
 875                goto out;
 876        ASSERT(bp1 != NULL);
 877        bp2 = NULL;
 878        error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp2,
 879                                            XFS_ATTR_FORK);
 880        if (error)
 881                goto out;
 882        ASSERT(bp2 != NULL);
 883        memcpy(bp2->b_addr, bp1->b_addr, XFS_LBSIZE(dp->i_mount));
 884        bp1 = NULL;
 885        xfs_trans_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
 886
 887        /*
 888         * Set up the new root node.
 889         */
 890        error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
 891        if (error)
 892                goto out;
 893        node = bp1->b_addr;
 894        leaf = bp2->b_addr;
 895        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
 896        /* both on-disk, don't endian-flip twice */
 897        node->btree[0].hashval =
 898                leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
 899        node->btree[0].before = cpu_to_be32(blkno);
 900        node->hdr.count = cpu_to_be16(1);
 901        xfs_trans_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
 902        error = 0;
 903out:
 904        return(error);
 905}
 906
 907
 908/*========================================================================
 909 * Routines used for growing the Btree.
 910 *========================================================================*/
 911
 912/*
 913 * Create the initial contents of a leaf attribute list
 914 * or a leaf in a node attribute list.
 915 */
 916STATIC int
 917xfs_attr_leaf_create(
 918        xfs_da_args_t   *args,
 919        xfs_dablk_t     blkno,
 920        struct xfs_buf  **bpp)
 921{
 922        xfs_attr_leafblock_t *leaf;
 923        xfs_attr_leaf_hdr_t *hdr;
 924        xfs_inode_t *dp;
 925        struct xfs_buf *bp;
 926        int error;
 927
 928        trace_xfs_attr_leaf_create(args);
 929
 930        dp = args->dp;
 931        ASSERT(dp != NULL);
 932        error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
 933                                            XFS_ATTR_FORK);
 934        if (error)
 935                return(error);
 936        ASSERT(bp != NULL);
 937        leaf = bp->b_addr;
 938        memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
 939        hdr = &leaf->hdr;
 940        hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC);
 941        hdr->firstused = cpu_to_be16(XFS_LBSIZE(dp->i_mount));
 942        if (!hdr->firstused) {
 943                hdr->firstused = cpu_to_be16(
 944                        XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN);
 945        }
 946
 947        hdr->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
 948        hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) -
 949                                           sizeof(xfs_attr_leaf_hdr_t));
 950
 951        xfs_trans_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
 952
 953        *bpp = bp;
 954        return(0);
 955}
 956
 957/*
 958 * Split the leaf node, rebalance, then add the new entry.
 959 */
 960int
 961xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
 962                                   xfs_da_state_blk_t *newblk)
 963{
 964        xfs_dablk_t blkno;
 965        int error;
 966
 967        trace_xfs_attr_leaf_split(state->args);
 968
 969        /*
 970         * Allocate space for a new leaf node.
 971         */
 972        ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC);
 973        error = xfs_da_grow_inode(state->args, &blkno);
 974        if (error)
 975                return(error);
 976        error = xfs_attr_leaf_create(state->args, blkno, &newblk->bp);
 977        if (error)
 978                return(error);
 979        newblk->blkno = blkno;
 980        newblk->magic = XFS_ATTR_LEAF_MAGIC;
 981
 982        /*
 983         * Rebalance the entries across the two leaves.
 984         * NOTE: rebalance() currently depends on the 2nd block being empty.
 985         */
 986        xfs_attr_leaf_rebalance(state, oldblk, newblk);
 987        error = xfs_da_blk_link(state, oldblk, newblk);
 988        if (error)
 989                return(error);
 990
 991        /*
 992         * Save info on "old" attribute for "atomic rename" ops, leaf_add()
 993         * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the
 994         * "new" attrs info.  Will need the "old" info to remove it later.
 995         *
 996         * Insert the "new" entry in the correct block.
 997         */
 998        if (state->inleaf) {
 999                trace_xfs_attr_leaf_add_old(state->args);
1000                error = xfs_attr_leaf_add(oldblk->bp, state->args);
1001        } else {
1002                trace_xfs_attr_leaf_add_new(state->args);
1003                error = xfs_attr_leaf_add(newblk->bp, state->args);
1004        }
1005
1006        /*
1007         * Update last hashval in each block since we added the name.
1008         */
1009        oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL);
1010        newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL);
1011        return(error);
1012}
1013
1014/*
1015 * Add a name to the leaf attribute list structure.
1016 */
1017int
1018xfs_attr_leaf_add(
1019        struct xfs_buf          *bp,
1020        struct xfs_da_args      *args)
1021{
1022        xfs_attr_leafblock_t *leaf;
1023        xfs_attr_leaf_hdr_t *hdr;
1024        xfs_attr_leaf_map_t *map;
1025        int tablesize, entsize, sum, tmp, i;
1026
1027        trace_xfs_attr_leaf_add(args);
1028
1029        leaf = bp->b_addr;
1030        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1031        ASSERT((args->index >= 0)
1032                && (args->index <= be16_to_cpu(leaf->hdr.count)));
1033        hdr = &leaf->hdr;
1034        entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1035                           args->trans->t_mountp->m_sb.sb_blocksize, NULL);
1036
1037        /*
1038         * Search through freemap for first-fit on new name length.
1039         * (may need to figure in size of entry struct too)
1040         */
1041        tablesize = (be16_to_cpu(hdr->count) + 1)
1042                                        * sizeof(xfs_attr_leaf_entry_t)
1043                                        + sizeof(xfs_attr_leaf_hdr_t);
1044        map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1];
1045        for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) {
1046                if (tablesize > be16_to_cpu(hdr->firstused)) {
1047                        sum += be16_to_cpu(map->size);
1048                        continue;
1049                }
1050                if (!map->size)
1051                        continue;       /* no space in this map */
1052                tmp = entsize;
1053                if (be16_to_cpu(map->base) < be16_to_cpu(hdr->firstused))
1054                        tmp += sizeof(xfs_attr_leaf_entry_t);
1055                if (be16_to_cpu(map->size) >= tmp) {
1056                        tmp = xfs_attr_leaf_add_work(bp, args, i);
1057                        return(tmp);
1058                }
1059                sum += be16_to_cpu(map->size);
1060        }
1061
1062        /*
1063         * If there are no holes in the address space of the block,
1064         * and we don't have enough freespace, then compaction will do us
1065         * no good and we should just give up.
1066         */
1067        if (!hdr->holes && (sum < entsize))
1068                return(XFS_ERROR(ENOSPC));
1069
1070        /*
1071         * Compact the entries to coalesce free space.
1072         * This may change the hdr->count via dropping INCOMPLETE entries.
1073         */
1074        xfs_attr_leaf_compact(args->trans, bp);
1075
1076        /*
1077         * After compaction, the block is guaranteed to have only one
1078         * free region, in freemap[0].  If it is not big enough, give up.
1079         */
1080        if (be16_to_cpu(hdr->freemap[0].size)
1081                                < (entsize + sizeof(xfs_attr_leaf_entry_t)))
1082                return(XFS_ERROR(ENOSPC));
1083
1084        return(xfs_attr_leaf_add_work(bp, args, 0));
1085}
1086
1087/*
1088 * Add a name to a leaf attribute list structure.
1089 */
1090STATIC int
1091xfs_attr_leaf_add_work(
1092        struct xfs_buf  *bp,
1093        xfs_da_args_t   *args,
1094        int             mapindex)
1095{
1096        xfs_attr_leafblock_t *leaf;
1097        xfs_attr_leaf_hdr_t *hdr;
1098        xfs_attr_leaf_entry_t *entry;
1099        xfs_attr_leaf_name_local_t *name_loc;
1100        xfs_attr_leaf_name_remote_t *name_rmt;
1101        xfs_attr_leaf_map_t *map;
1102        xfs_mount_t *mp;
1103        int tmp, i;
1104
1105        leaf = bp->b_addr;
1106        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1107        hdr = &leaf->hdr;
1108        ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
1109        ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count)));
1110
1111        /*
1112         * Force open some space in the entry array and fill it in.
1113         */
1114        entry = &leaf->entries[args->index];
1115        if (args->index < be16_to_cpu(hdr->count)) {
1116                tmp  = be16_to_cpu(hdr->count) - args->index;
1117                tmp *= sizeof(xfs_attr_leaf_entry_t);
1118                memmove((char *)(entry+1), (char *)entry, tmp);
1119                xfs_trans_log_buf(args->trans, bp,
1120                    XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1121        }
1122        be16_add_cpu(&hdr->count, 1);
1123
1124        /*
1125         * Allocate space for the new string (at the end of the run).
1126         */
1127        map = &hdr->freemap[mapindex];
1128        mp = args->trans->t_mountp;
1129        ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
1130        ASSERT((be16_to_cpu(map->base) & 0x3) == 0);
1131        ASSERT(be16_to_cpu(map->size) >=
1132                xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1133                                         mp->m_sb.sb_blocksize, NULL));
1134        ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
1135        ASSERT((be16_to_cpu(map->size) & 0x3) == 0);
1136        be16_add_cpu(&map->size,
1137                -xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1138                                          mp->m_sb.sb_blocksize, &tmp));
1139        entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) +
1140                                     be16_to_cpu(map->size));
1141        entry->hashval = cpu_to_be32(args->hashval);
1142        entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
1143        entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
1144        if (args->op_flags & XFS_DA_OP_RENAME) {
1145                entry->flags |= XFS_ATTR_INCOMPLETE;
1146                if ((args->blkno2 == args->blkno) &&
1147                    (args->index2 <= args->index)) {
1148                        args->index2++;
1149                }
1150        }
1151        xfs_trans_log_buf(args->trans, bp,
1152                          XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
1153        ASSERT((args->index == 0) ||
1154               (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
1155        ASSERT((args->index == be16_to_cpu(hdr->count)-1) ||
1156               (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval)));
1157
1158        /*
1159         * For "remote" attribute values, simply note that we need to
1160         * allocate space for the "remote" value.  We can't actually
1161         * allocate the extents in this transaction, and we can't decide
1162         * which blocks they should be as we might allocate more blocks
1163         * as part of this transaction (a split operation for example).
1164         */
1165        if (entry->flags & XFS_ATTR_LOCAL) {
1166                name_loc = xfs_attr_leaf_name_local(leaf, args->index);
1167                name_loc->namelen = args->namelen;
1168                name_loc->valuelen = cpu_to_be16(args->valuelen);
1169                memcpy((char *)name_loc->nameval, args->name, args->namelen);
1170                memcpy((char *)&name_loc->nameval[args->namelen], args->value,
1171                                   be16_to_cpu(name_loc->valuelen));
1172        } else {
1173                name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
1174                name_rmt->namelen = args->namelen;
1175                memcpy((char *)name_rmt->name, args->name, args->namelen);
1176                entry->flags |= XFS_ATTR_INCOMPLETE;
1177                /* just in case */
1178                name_rmt->valuelen = 0;
1179                name_rmt->valueblk = 0;
1180                args->rmtblkno = 1;
1181                args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
1182        }
1183        xfs_trans_log_buf(args->trans, bp,
1184             XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
1185                                   xfs_attr_leaf_entsize(leaf, args->index)));
1186
1187        /*
1188         * Update the control info for this leaf node
1189         */
1190        if (be16_to_cpu(entry->nameidx) < be16_to_cpu(hdr->firstused)) {
1191                /* both on-disk, don't endian-flip twice */
1192                hdr->firstused = entry->nameidx;
1193        }
1194        ASSERT(be16_to_cpu(hdr->firstused) >=
1195               ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
1196        tmp = (be16_to_cpu(hdr->count)-1) * sizeof(xfs_attr_leaf_entry_t)
1197                                        + sizeof(xfs_attr_leaf_hdr_t);
1198        map = &hdr->freemap[0];
1199        for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
1200                if (be16_to_cpu(map->base) == tmp) {
1201                        be16_add_cpu(&map->base, sizeof(xfs_attr_leaf_entry_t));
1202                        be16_add_cpu(&map->size,
1203                                 -((int)sizeof(xfs_attr_leaf_entry_t)));
1204                }
1205        }
1206        be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
1207        xfs_trans_log_buf(args->trans, bp,
1208                XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
1209        return(0);
1210}
1211
1212/*
1213 * Garbage collect a leaf attribute list block by copying it to a new buffer.
1214 */
1215STATIC void
1216xfs_attr_leaf_compact(
1217        struct xfs_trans *trans,
1218        struct xfs_buf  *bp)
1219{
1220        xfs_attr_leafblock_t *leaf_s, *leaf_d;
1221        xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
1222        xfs_mount_t *mp;
1223        char *tmpbuffer;
1224
1225        mp = trans->t_mountp;
1226        tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
1227        ASSERT(tmpbuffer != NULL);
1228        memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
1229        memset(bp->b_addr, 0, XFS_LBSIZE(mp));
1230
1231        /*
1232         * Copy basic information
1233         */
1234        leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
1235        leaf_d = bp->b_addr;
1236        hdr_s = &leaf_s->hdr;
1237        hdr_d = &leaf_d->hdr;
1238        hdr_d->info = hdr_s->info;      /* struct copy */
1239        hdr_d->firstused = cpu_to_be16(XFS_LBSIZE(mp));
1240        /* handle truncation gracefully */
1241        if (!hdr_d->firstused) {
1242                hdr_d->firstused = cpu_to_be16(
1243                                XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN);
1244        }
1245        hdr_d->usedbytes = 0;
1246        hdr_d->count = 0;
1247        hdr_d->holes = 0;
1248        hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
1249        hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) -
1250                                             sizeof(xfs_attr_leaf_hdr_t));
1251
1252        /*
1253         * Copy all entry's in the same (sorted) order,
1254         * but allocate name/value pairs packed and in sequence.
1255         */
1256        xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0,
1257                                be16_to_cpu(hdr_s->count), mp);
1258        xfs_trans_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
1259
1260        kmem_free(tmpbuffer);
1261}
1262
1263/*
1264 * Redistribute the attribute list entries between two leaf nodes,
1265 * taking into account the size of the new entry.
1266 *
1267 * NOTE: if new block is empty, then it will get the upper half of the
1268 * old block.  At present, all (one) callers pass in an empty second block.
1269 *
1270 * This code adjusts the args->index/blkno and args->index2/blkno2 fields
1271 * to match what it is doing in splitting the attribute leaf block.  Those
1272 * values are used in "atomic rename" operations on attributes.  Note that
1273 * the "new" and "old" values can end up in different blocks.
1274 */
1275STATIC void
1276xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1277                                       xfs_da_state_blk_t *blk2)
1278{
1279        xfs_da_args_t *args;
1280        xfs_da_state_blk_t *tmp_blk;
1281        xfs_attr_leafblock_t *leaf1, *leaf2;
1282        xfs_attr_leaf_hdr_t *hdr1, *hdr2;
1283        int count, totallen, max, space, swap;
1284
1285        /*
1286         * Set up environment.
1287         */
1288        ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
1289        ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
1290        leaf1 = blk1->bp->b_addr;
1291        leaf2 = blk2->bp->b_addr;
1292        ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1293        ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1294        ASSERT(leaf2->hdr.count == 0);
1295        args = state->args;
1296
1297        trace_xfs_attr_leaf_rebalance(args);
1298
1299        /*
1300         * Check ordering of blocks, reverse if it makes things simpler.
1301         *
1302         * NOTE: Given that all (current) callers pass in an empty
1303         * second block, this code should never set "swap".
1304         */
1305        swap = 0;
1306        if (xfs_attr_leaf_order(blk1->bp, blk2->bp)) {
1307                tmp_blk = blk1;
1308                blk1 = blk2;
1309                blk2 = tmp_blk;
1310                leaf1 = blk1->bp->b_addr;
1311                leaf2 = blk2->bp->b_addr;
1312                swap = 1;
1313        }
1314        hdr1 = &leaf1->hdr;
1315        hdr2 = &leaf2->hdr;
1316
1317        /*
1318         * Examine entries until we reduce the absolute difference in
1319         * byte usage between the two blocks to a minimum.  Then get
1320         * the direction to copy and the number of elements to move.
1321         *
1322         * "inleaf" is true if the new entry should be inserted into blk1.
1323         * If "swap" is also true, then reverse the sense of "inleaf".
1324         */
1325        state->inleaf = xfs_attr_leaf_figure_balance(state, blk1, blk2,
1326                                                            &count, &totallen);
1327        if (swap)
1328                state->inleaf = !state->inleaf;
1329
1330        /*
1331         * Move any entries required from leaf to leaf:
1332         */
1333        if (count < be16_to_cpu(hdr1->count)) {
1334                /*
1335                 * Figure the total bytes to be added to the destination leaf.
1336                 */
1337                /* number entries being moved */
1338                count = be16_to_cpu(hdr1->count) - count;
1339                space  = be16_to_cpu(hdr1->usedbytes) - totallen;
1340                space += count * sizeof(xfs_attr_leaf_entry_t);
1341
1342                /*
1343                 * leaf2 is the destination, compact it if it looks tight.
1344                 */
1345                max  = be16_to_cpu(hdr2->firstused)
1346                                                - sizeof(xfs_attr_leaf_hdr_t);
1347                max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t);
1348                if (space > max) {
1349                        xfs_attr_leaf_compact(args->trans, blk2->bp);
1350                }
1351
1352                /*
1353                 * Move high entries from leaf1 to low end of leaf2.
1354                 */
1355                xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count,
1356                                leaf2, 0, count, state->mp);
1357
1358                xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
1359                xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
1360        } else if (count > be16_to_cpu(hdr1->count)) {
1361                /*
1362                 * I assert that since all callers pass in an empty
1363                 * second buffer, this code should never execute.
1364                 */
1365                ASSERT(0);
1366
1367                /*
1368                 * Figure the total bytes to be added to the destination leaf.
1369                 */
1370                /* number entries being moved */
1371                count -= be16_to_cpu(hdr1->count);
1372                space  = totallen - be16_to_cpu(hdr1->usedbytes);
1373                space += count * sizeof(xfs_attr_leaf_entry_t);
1374
1375                /*
1376                 * leaf1 is the destination, compact it if it looks tight.
1377                 */
1378                max  = be16_to_cpu(hdr1->firstused)
1379                                                - sizeof(xfs_attr_leaf_hdr_t);
1380                max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t);
1381                if (space > max) {
1382                        xfs_attr_leaf_compact(args->trans, blk1->bp);
1383                }
1384
1385                /*
1386                 * Move low entries from leaf2 to high end of leaf1.
1387                 */
1388                xfs_attr_leaf_moveents(leaf2, 0, leaf1,
1389                                be16_to_cpu(hdr1->count), count, state->mp);
1390
1391                xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
1392                xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
1393        }
1394
1395        /*
1396         * Copy out last hashval in each block for B-tree code.
1397         */
1398        blk1->hashval = be32_to_cpu(
1399                leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval);
1400        blk2->hashval = be32_to_cpu(
1401                leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval);
1402
1403        /*
1404         * Adjust the expected index for insertion.
1405         * NOTE: this code depends on the (current) situation that the
1406         * second block was originally empty.
1407         *
1408         * If the insertion point moved to the 2nd block, we must adjust
1409         * the index.  We must also track the entry just following the
1410         * new entry for use in an "atomic rename" operation, that entry
1411         * is always the "old" entry and the "new" entry is what we are
1412         * inserting.  The index/blkno fields refer to the "old" entry,
1413         * while the index2/blkno2 fields refer to the "new" entry.
1414         */
1415        if (blk1->index > be16_to_cpu(leaf1->hdr.count)) {
1416                ASSERT(state->inleaf == 0);
1417                blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count);
1418                args->index = args->index2 = blk2->index;
1419                args->blkno = args->blkno2 = blk2->blkno;
1420        } else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) {
1421                if (state->inleaf) {
1422                        args->index = blk1->index;
1423                        args->blkno = blk1->blkno;
1424                        args->index2 = 0;
1425                        args->blkno2 = blk2->blkno;
1426                } else {
1427                        /*
1428                         * On a double leaf split, the original attr location
1429                         * is already stored in blkno2/index2, so don't
1430                         * overwrite it overwise we corrupt the tree.
1431                         */
1432                        blk2->index = blk1->index
1433                                    - be16_to_cpu(leaf1->hdr.count);
1434                        args->index = blk2->index;
1435                        args->blkno = blk2->blkno;
1436                        if (!state->extravalid) {
1437                                /*
1438                                 * set the new attr location to match the old
1439                                 * one and let the higher level split code
1440                                 * decide where in the leaf to place it.
1441                                 */
1442                                args->index2 = blk2->index;
1443                                args->blkno2 = blk2->blkno;
1444                        }
1445                }
1446        } else {
1447                ASSERT(state->inleaf == 1);
1448                args->index = args->index2 = blk1->index;
1449                args->blkno = args->blkno2 = blk1->blkno;
1450        }
1451}
1452
1453/*
1454 * Examine entries until we reduce the absolute difference in
1455 * byte usage between the two blocks to a minimum.
1456 * GROT: Is this really necessary?  With other than a 512 byte blocksize,
1457 * GROT: there will always be enough room in either block for a new entry.
1458 * GROT: Do a double-split for this case?
1459 */
1460STATIC int
1461xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
1462                                    xfs_da_state_blk_t *blk1,
1463                                    xfs_da_state_blk_t *blk2,
1464                                    int *countarg, int *usedbytesarg)
1465{
1466        xfs_attr_leafblock_t *leaf1, *leaf2;
1467        xfs_attr_leaf_hdr_t *hdr1, *hdr2;
1468        xfs_attr_leaf_entry_t *entry;
1469        int count, max, index, totallen, half;
1470        int lastdelta, foundit, tmp;
1471
1472        /*
1473         * Set up environment.
1474         */
1475        leaf1 = blk1->bp->b_addr;
1476        leaf2 = blk2->bp->b_addr;
1477        hdr1 = &leaf1->hdr;
1478        hdr2 = &leaf2->hdr;
1479        foundit = 0;
1480        totallen = 0;
1481
1482        /*
1483         * Examine entries until we reduce the absolute difference in
1484         * byte usage between the two blocks to a minimum.
1485         */
1486        max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count);
1487        half  = (max+1) * sizeof(*entry);
1488        half += be16_to_cpu(hdr1->usedbytes) +
1489                be16_to_cpu(hdr2->usedbytes) +
1490                xfs_attr_leaf_newentsize(
1491                                state->args->namelen,
1492                                state->args->valuelen,
1493                                state->blocksize, NULL);
1494        half /= 2;
1495        lastdelta = state->blocksize;
1496        entry = &leaf1->entries[0];
1497        for (count = index = 0; count < max; entry++, index++, count++) {
1498
1499#define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A))
1500                /*
1501                 * The new entry is in the first block, account for it.
1502                 */
1503                if (count == blk1->index) {
1504                        tmp = totallen + sizeof(*entry) +
1505                                xfs_attr_leaf_newentsize(
1506                                                state->args->namelen,
1507                                                state->args->valuelen,
1508                                                state->blocksize, NULL);
1509                        if (XFS_ATTR_ABS(half - tmp) > lastdelta)
1510                                break;
1511                        lastdelta = XFS_ATTR_ABS(half - tmp);
1512                        totallen = tmp;
1513                        foundit = 1;
1514                }
1515
1516                /*
1517                 * Wrap around into the second block if necessary.
1518                 */
1519                if (count == be16_to_cpu(hdr1->count)) {
1520                        leaf1 = leaf2;
1521                        entry = &leaf1->entries[0];
1522                        index = 0;
1523                }
1524
1525                /*
1526                 * Figure out if next leaf entry would be too much.
1527                 */
1528                tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1,
1529                                                                        index);
1530                if (XFS_ATTR_ABS(half - tmp) > lastdelta)
1531                        break;
1532                lastdelta = XFS_ATTR_ABS(half - tmp);
1533                totallen = tmp;
1534#undef XFS_ATTR_ABS
1535        }
1536
1537        /*
1538         * Calculate the number of usedbytes that will end up in lower block.
1539         * If new entry not in lower block, fix up the count.
1540         */
1541        totallen -= count * sizeof(*entry);
1542        if (foundit) {
1543                totallen -= sizeof(*entry) +
1544                                xfs_attr_leaf_newentsize(
1545                                                state->args->namelen,
1546                                                state->args->valuelen,
1547                                                state->blocksize, NULL);
1548        }
1549
1550        *countarg = count;
1551        *usedbytesarg = totallen;
1552        return(foundit);
1553}
1554
1555/*========================================================================
1556 * Routines used for shrinking the Btree.
1557 *========================================================================*/
1558
1559/*
1560 * Check a leaf block and its neighbors to see if the block should be
1561 * collapsed into one or the other neighbor.  Always keep the block
1562 * with the smaller block number.
1563 * If the current block is over 50% full, don't try to join it, return 0.
1564 * If the block is empty, fill in the state structure and return 2.
1565 * If it can be collapsed, fill in the state structure and return 1.
1566 * If nothing can be done, return 0.
1567 *
1568 * GROT: allow for INCOMPLETE entries in calculation.
1569 */
1570int
1571xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1572{
1573        xfs_attr_leafblock_t *leaf;
1574        xfs_da_state_blk_t *blk;
1575        xfs_da_blkinfo_t *info;
1576        int count, bytes, forward, error, retval, i;
1577        xfs_dablk_t blkno;
1578        struct xfs_buf *bp;
1579
1580        /*
1581         * Check for the degenerate case of the block being over 50% full.
1582         * If so, it's not worth even looking to see if we might be able
1583         * to coalesce with a sibling.
1584         */
1585        blk = &state->path.blk[ state->path.active-1 ];
1586        info = blk->bp->b_addr;
1587        ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1588        leaf = (xfs_attr_leafblock_t *)info;
1589        count = be16_to_cpu(leaf->hdr.count);
1590        bytes = sizeof(xfs_attr_leaf_hdr_t) +
1591                count * sizeof(xfs_attr_leaf_entry_t) +
1592                be16_to_cpu(leaf->hdr.usedbytes);
1593        if (bytes > (state->blocksize >> 1)) {
1594                *action = 0;    /* blk over 50%, don't try to join */
1595                return(0);
1596        }
1597
1598        /*
1599         * Check for the degenerate case of the block being empty.
1600         * If the block is empty, we'll simply delete it, no need to
1601         * coalesce it with a sibling block.  We choose (arbitrarily)
1602         * to merge with the forward block unless it is NULL.
1603         */
1604        if (count == 0) {
1605                /*
1606                 * Make altpath point to the block we want to keep and
1607                 * path point to the block we want to drop (this one).
1608                 */
1609                forward = (info->forw != 0);
1610                memcpy(&state->altpath, &state->path, sizeof(state->path));
1611                error = xfs_da_path_shift(state, &state->altpath, forward,
1612                                                 0, &retval);
1613                if (error)
1614                        return(error);
1615                if (retval) {
1616                        *action = 0;
1617                } else {
1618                        *action = 2;
1619                }
1620                return(0);
1621        }
1622
1623        /*
1624         * Examine each sibling block to see if we can coalesce with
1625         * at least 25% free space to spare.  We need to figure out
1626         * whether to merge with the forward or the backward block.
1627         * We prefer coalescing with the lower numbered sibling so as
1628         * to shrink an attribute list over time.
1629         */
1630        /* start with smaller blk num */
1631        forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
1632        for (i = 0; i < 2; forward = !forward, i++) {
1633                if (forward)
1634                        blkno = be32_to_cpu(info->forw);
1635                else
1636                        blkno = be32_to_cpu(info->back);
1637                if (blkno == 0)
1638                        continue;
1639                error = xfs_da_read_buf(state->args->trans, state->args->dp,
1640                                        blkno, -1, &bp, XFS_ATTR_FORK);
1641                if (error)
1642                        return(error);
1643                ASSERT(bp != NULL);
1644
1645                leaf = (xfs_attr_leafblock_t *)info;
1646                count  = be16_to_cpu(leaf->hdr.count);
1647                bytes  = state->blocksize - (state->blocksize>>2);
1648                bytes -= be16_to_cpu(leaf->hdr.usedbytes);
1649                leaf = bp->b_addr;
1650                ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1651                count += be16_to_cpu(leaf->hdr.count);
1652                bytes -= be16_to_cpu(leaf->hdr.usedbytes);
1653                bytes -= count * sizeof(xfs_attr_leaf_entry_t);
1654                bytes -= sizeof(xfs_attr_leaf_hdr_t);
1655                xfs_trans_brelse(state->args->trans, bp);
1656                if (bytes >= 0)
1657                        break;  /* fits with at least 25% to spare */
1658        }
1659        if (i >= 2) {
1660                *action = 0;
1661                return(0);
1662        }
1663
1664        /*
1665         * Make altpath point to the block we want to keep (the lower
1666         * numbered block) and path point to the block we want to drop.
1667         */
1668        memcpy(&state->altpath, &state->path, sizeof(state->path));
1669        if (blkno < blk->blkno) {
1670                error = xfs_da_path_shift(state, &state->altpath, forward,
1671                                                 0, &retval);
1672        } else {
1673                error = xfs_da_path_shift(state, &state->path, forward,
1674                                                 0, &retval);
1675        }
1676        if (error)
1677                return(error);
1678        if (retval) {
1679                *action = 0;
1680        } else {
1681                *action = 1;
1682        }
1683        return(0);
1684}
1685
1686/*
1687 * Remove a name from the leaf attribute list structure.
1688 *
1689 * Return 1 if leaf is less than 37% full, 0 if >= 37% full.
1690 * If two leaves are 37% full, when combined they will leave 25% free.
1691 */
1692int
1693xfs_attr_leaf_remove(
1694        struct xfs_buf  *bp,
1695        xfs_da_args_t   *args)
1696{
1697        xfs_attr_leafblock_t *leaf;
1698        xfs_attr_leaf_hdr_t *hdr;
1699        xfs_attr_leaf_map_t *map;
1700        xfs_attr_leaf_entry_t *entry;
1701        int before, after, smallest, entsize;
1702        int tablesize, tmp, i;
1703        xfs_mount_t *mp;
1704
1705        leaf = bp->b_addr;
1706        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1707        hdr = &leaf->hdr;
1708        mp = args->trans->t_mountp;
1709        ASSERT((be16_to_cpu(hdr->count) > 0)
1710                && (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8)));
1711        ASSERT((args->index >= 0)
1712                && (args->index < be16_to_cpu(hdr->count)));
1713        ASSERT(be16_to_cpu(hdr->firstused) >=
1714               ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
1715        entry = &leaf->entries[args->index];
1716        ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused));
1717        ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
1718
1719        /*
1720         * Scan through free region table:
1721         *    check for adjacency of free'd entry with an existing one,
1722         *    find smallest free region in case we need to replace it,
1723         *    adjust any map that borders the entry table,
1724         */
1725        tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t)
1726                                        + sizeof(xfs_attr_leaf_hdr_t);
1727        map = &hdr->freemap[0];
1728        tmp = be16_to_cpu(map->size);
1729        before = after = -1;
1730        smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
1731        entsize = xfs_attr_leaf_entsize(leaf, args->index);
1732        for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
1733                ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
1734                ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
1735                if (be16_to_cpu(map->base) == tablesize) {
1736                        be16_add_cpu(&map->base,
1737                                 -((int)sizeof(xfs_attr_leaf_entry_t)));
1738                        be16_add_cpu(&map->size, sizeof(xfs_attr_leaf_entry_t));
1739                }
1740
1741                if ((be16_to_cpu(map->base) + be16_to_cpu(map->size))
1742                                == be16_to_cpu(entry->nameidx)) {
1743                        before = i;
1744                } else if (be16_to_cpu(map->base)
1745                        == (be16_to_cpu(entry->nameidx) + entsize)) {
1746                        after = i;
1747                } else if (be16_to_cpu(map->size) < tmp) {
1748                        tmp = be16_to_cpu(map->size);
1749                        smallest = i;
1750                }
1751        }
1752
1753        /*
1754         * Coalesce adjacent freemap regions,
1755         * or replace the smallest region.
1756         */
1757        if ((before >= 0) || (after >= 0)) {
1758                if ((before >= 0) && (after >= 0)) {
1759                        map = &hdr->freemap[before];
1760                        be16_add_cpu(&map->size, entsize);
1761                        be16_add_cpu(&map->size,
1762                                 be16_to_cpu(hdr->freemap[after].size));
1763                        hdr->freemap[after].base = 0;
1764                        hdr->freemap[after].size = 0;
1765                } else if (before >= 0) {
1766                        map = &hdr->freemap[before];
1767                        be16_add_cpu(&map->size, entsize);
1768                } else {
1769                        map = &hdr->freemap[after];
1770                        /* both on-disk, don't endian flip twice */
1771                        map->base = entry->nameidx;
1772                        be16_add_cpu(&map->size, entsize);
1773                }
1774        } else {
1775                /*
1776                 * Replace smallest region (if it is smaller than free'd entry)
1777                 */
1778                map = &hdr->freemap[smallest];
1779                if (be16_to_cpu(map->size) < entsize) {
1780                        map->base = cpu_to_be16(be16_to_cpu(entry->nameidx));
1781                        map->size = cpu_to_be16(entsize);
1782                }
1783        }
1784
1785        /*
1786         * Did we remove the first entry?
1787         */
1788        if (be16_to_cpu(entry->nameidx) == be16_to_cpu(hdr->firstused))
1789                smallest = 1;
1790        else
1791                smallest = 0;
1792
1793        /*
1794         * Compress the remaining entries and zero out the removed stuff.
1795         */
1796        memset(xfs_attr_leaf_name(leaf, args->index), 0, entsize);
1797        be16_add_cpu(&hdr->usedbytes, -entsize);
1798        xfs_trans_log_buf(args->trans, bp,
1799             XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
1800                                   entsize));
1801
1802        tmp = (be16_to_cpu(hdr->count) - args->index)
1803                                        * sizeof(xfs_attr_leaf_entry_t);
1804        memmove((char *)entry, (char *)(entry+1), tmp);
1805        be16_add_cpu(&hdr->count, -1);
1806        xfs_trans_log_buf(args->trans, bp,
1807            XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1808        entry = &leaf->entries[be16_to_cpu(hdr->count)];
1809        memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
1810
1811        /*
1812         * If we removed the first entry, re-find the first used byte
1813         * in the name area.  Note that if the entry was the "firstused",
1814         * then we don't have a "hole" in our block resulting from
1815         * removing the name.
1816         */
1817        if (smallest) {
1818                tmp = XFS_LBSIZE(mp);
1819                entry = &leaf->entries[0];
1820                for (i = be16_to_cpu(hdr->count)-1; i >= 0; entry++, i--) {
1821                        ASSERT(be16_to_cpu(entry->nameidx) >=
1822                               be16_to_cpu(hdr->firstused));
1823                        ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
1824
1825                        if (be16_to_cpu(entry->nameidx) < tmp)
1826                                tmp = be16_to_cpu(entry->nameidx);
1827                }
1828                hdr->firstused = cpu_to_be16(tmp);
1829                if (!hdr->firstused) {
1830                        hdr->firstused = cpu_to_be16(
1831                                        tmp - XFS_ATTR_LEAF_NAME_ALIGN);
1832                }
1833        } else {
1834                hdr->holes = 1;         /* mark as needing compaction */
1835        }
1836        xfs_trans_log_buf(args->trans, bp,
1837                          XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
1838
1839        /*
1840         * Check if leaf is less than 50% full, caller may want to
1841         * "join" the leaf with a sibling if so.
1842         */
1843        tmp  = sizeof(xfs_attr_leaf_hdr_t);
1844        tmp += be16_to_cpu(leaf->hdr.count) * sizeof(xfs_attr_leaf_entry_t);
1845        tmp += be16_to_cpu(leaf->hdr.usedbytes);
1846        return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */
1847}
1848
1849/*
1850 * Move all the attribute list entries from drop_leaf into save_leaf.
1851 */
1852void
1853xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1854                                       xfs_da_state_blk_t *save_blk)
1855{
1856        xfs_attr_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf;
1857        xfs_attr_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr;
1858        xfs_mount_t *mp;
1859        char *tmpbuffer;
1860
1861        trace_xfs_attr_leaf_unbalance(state->args);
1862
1863        /*
1864         * Set up environment.
1865         */
1866        mp = state->mp;
1867        ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC);
1868        ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1869        drop_leaf = drop_blk->bp->b_addr;
1870        save_leaf = save_blk->bp->b_addr;
1871        ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1872        ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1873        drop_hdr = &drop_leaf->hdr;
1874        save_hdr = &save_leaf->hdr;
1875
1876        /*
1877         * Save last hashval from dying block for later Btree fixup.
1878         */
1879        drop_blk->hashval = be32_to_cpu(
1880                drop_leaf->entries[be16_to_cpu(drop_leaf->hdr.count)-1].hashval);
1881
1882        /*
1883         * Check if we need a temp buffer, or can we do it in place.
1884         * Note that we don't check "leaf" for holes because we will
1885         * always be dropping it, toosmall() decided that for us already.
1886         */
1887        if (save_hdr->holes == 0) {
1888                /*
1889                 * dest leaf has no holes, so we add there.  May need
1890                 * to make some room in the entry array.
1891                 */
1892                if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
1893                        xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0,
1894                             be16_to_cpu(drop_hdr->count), mp);
1895                } else {
1896                        xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf,
1897                                  be16_to_cpu(save_hdr->count),
1898                                  be16_to_cpu(drop_hdr->count), mp);
1899                }
1900        } else {
1901                /*
1902                 * Destination has holes, so we make a temporary copy
1903                 * of the leaf and add them both to that.
1904                 */
1905                tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
1906                ASSERT(tmpbuffer != NULL);
1907                memset(tmpbuffer, 0, state->blocksize);
1908                tmp_leaf = (xfs_attr_leafblock_t *)tmpbuffer;
1909                tmp_hdr = &tmp_leaf->hdr;
1910                tmp_hdr->info = save_hdr->info; /* struct copy */
1911                tmp_hdr->count = 0;
1912                tmp_hdr->firstused = cpu_to_be16(state->blocksize);
1913                if (!tmp_hdr->firstused) {
1914                        tmp_hdr->firstused = cpu_to_be16(
1915                                state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN);
1916                }
1917                tmp_hdr->usedbytes = 0;
1918                if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
1919                        xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0,
1920                                be16_to_cpu(drop_hdr->count), mp);
1921                        xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf,
1922                                  be16_to_cpu(tmp_leaf->hdr.count),
1923                                  be16_to_cpu(save_hdr->count), mp);
1924                } else {
1925                        xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0,
1926                                be16_to_cpu(save_hdr->count), mp);
1927                        xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf,
1928                                be16_to_cpu(tmp_leaf->hdr.count),
1929                                be16_to_cpu(drop_hdr->count), mp);
1930                }
1931                memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize);
1932                kmem_free(tmpbuffer);
1933        }
1934
1935        xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
1936                                           state->blocksize - 1);
1937
1938        /*
1939         * Copy out last hashval in each block for B-tree code.
1940         */
1941        save_blk->hashval = be32_to_cpu(
1942                save_leaf->entries[be16_to_cpu(save_leaf->hdr.count)-1].hashval);
1943}
1944
1945/*========================================================================
1946 * Routines used for finding things in the Btree.
1947 *========================================================================*/
1948
1949/*
1950 * Look up a name in a leaf attribute list structure.
1951 * This is the internal routine, it uses the caller's buffer.
1952 *
1953 * Note that duplicate keys are allowed, but only check within the
1954 * current leaf node.  The Btree code must check in adjacent leaf nodes.
1955 *
1956 * Return in args->index the index into the entry[] array of either
1957 * the found entry, or where the entry should have been (insert before
1958 * that entry).
1959 *
1960 * Don't change the args->value unless we find the attribute.
1961 */
1962int
1963xfs_attr_leaf_lookup_int(
1964        struct xfs_buf  *bp,
1965        xfs_da_args_t   *args)
1966{
1967        xfs_attr_leafblock_t *leaf;
1968        xfs_attr_leaf_entry_t *entry;
1969        xfs_attr_leaf_name_local_t *name_loc;
1970        xfs_attr_leaf_name_remote_t *name_rmt;
1971        int probe, span;
1972        xfs_dahash_t hashval;
1973
1974        trace_xfs_attr_leaf_lookup(args);
1975
1976        leaf = bp->b_addr;
1977        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1978        ASSERT(be16_to_cpu(leaf->hdr.count)
1979                                        < (XFS_LBSIZE(args->dp->i_mount)/8));
1980
1981        /*
1982         * Binary search.  (note: small blocks will skip this loop)
1983         */
1984        hashval = args->hashval;
1985        probe = span = be16_to_cpu(leaf->hdr.count) / 2;
1986        for (entry = &leaf->entries[probe]; span > 4;
1987                   entry = &leaf->entries[probe]) {
1988                span /= 2;
1989                if (be32_to_cpu(entry->hashval) < hashval)
1990                        probe += span;
1991                else if (be32_to_cpu(entry->hashval) > hashval)
1992                        probe -= span;
1993                else
1994                        break;
1995        }
1996        ASSERT((probe >= 0) &&
1997               (!leaf->hdr.count
1998               || (probe < be16_to_cpu(leaf->hdr.count))));
1999        ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval));
2000
2001        /*
2002         * Since we may have duplicate hashval's, find the first matching
2003         * hashval in the leaf.
2004         */
2005        while ((probe > 0) && (be32_to_cpu(entry->hashval) >= hashval)) {
2006                entry--;
2007                probe--;
2008        }
2009        while ((probe < be16_to_cpu(leaf->hdr.count)) &&
2010               (be32_to_cpu(entry->hashval) < hashval)) {
2011                entry++;
2012                probe++;
2013        }
2014        if ((probe == be16_to_cpu(leaf->hdr.count)) ||
2015            (be32_to_cpu(entry->hashval) != hashval)) {
2016                args->index = probe;
2017                return(XFS_ERROR(ENOATTR));
2018        }
2019
2020        /*
2021         * Duplicate keys may be present, so search all of them for a match.
2022         */
2023        for (  ; (probe < be16_to_cpu(leaf->hdr.count)) &&
2024                        (be32_to_cpu(entry->hashval) == hashval);
2025                        entry++, probe++) {
2026/*
2027 * GROT: Add code to remove incomplete entries.
2028 */
2029                /*
2030                 * If we are looking for INCOMPLETE entries, show only those.
2031                 * If we are looking for complete entries, show only those.
2032                 */
2033                if ((args->flags & XFS_ATTR_INCOMPLETE) !=
2034                    (entry->flags & XFS_ATTR_INCOMPLETE)) {
2035                        continue;
2036                }
2037                if (entry->flags & XFS_ATTR_LOCAL) {
2038                        name_loc = xfs_attr_leaf_name_local(leaf, probe);
2039                        if (name_loc->namelen != args->namelen)
2040                                continue;
2041                        if (memcmp(args->name, (char *)name_loc->nameval, args->namelen) != 0)
2042                                continue;
2043                        if (!xfs_attr_namesp_match(args->flags, entry->flags))
2044                                continue;
2045                        args->index = probe;
2046                        return(XFS_ERROR(EEXIST));
2047                } else {
2048                        name_rmt = xfs_attr_leaf_name_remote(leaf, probe);
2049                        if (name_rmt->namelen != args->namelen)
2050                                continue;
2051                        if (memcmp(args->name, (char *)name_rmt->name,
2052                                             args->namelen) != 0)
2053                                continue;
2054                        if (!xfs_attr_namesp_match(args->flags, entry->flags))
2055                                continue;
2056                        args->index = probe;
2057                        args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2058                        args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
2059                                                   be32_to_cpu(name_rmt->valuelen));
2060                        return(XFS_ERROR(EEXIST));
2061                }
2062        }
2063        args->index = probe;
2064        return(XFS_ERROR(ENOATTR));
2065}
2066
2067/*
2068 * Get the value associated with an attribute name from a leaf attribute
2069 * list structure.
2070 */
2071int
2072xfs_attr_leaf_getvalue(
2073        struct xfs_buf  *bp,
2074        xfs_da_args_t   *args)
2075{
2076        int valuelen;
2077        xfs_attr_leafblock_t *leaf;
2078        xfs_attr_leaf_entry_t *entry;
2079        xfs_attr_leaf_name_local_t *name_loc;
2080        xfs_attr_leaf_name_remote_t *name_rmt;
2081
2082        leaf = bp->b_addr;
2083        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2084        ASSERT(be16_to_cpu(leaf->hdr.count)
2085                                        < (XFS_LBSIZE(args->dp->i_mount)/8));
2086        ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2087
2088        entry = &leaf->entries[args->index];
2089        if (entry->flags & XFS_ATTR_LOCAL) {
2090                name_loc = xfs_attr_leaf_name_local(leaf, args->index);
2091                ASSERT(name_loc->namelen == args->namelen);
2092                ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
2093                valuelen = be16_to_cpu(name_loc->valuelen);
2094                if (args->flags & ATTR_KERNOVAL) {
2095                        args->valuelen = valuelen;
2096                        return(0);
2097                }
2098                if (args->valuelen < valuelen) {
2099                        args->valuelen = valuelen;
2100                        return(XFS_ERROR(ERANGE));
2101                }
2102                args->valuelen = valuelen;
2103                memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
2104        } else {
2105                name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
2106                ASSERT(name_rmt->namelen == args->namelen);
2107                ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
2108                valuelen = be32_to_cpu(name_rmt->valuelen);
2109                args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2110                args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
2111                if (args->flags & ATTR_KERNOVAL) {
2112                        args->valuelen = valuelen;
2113                        return(0);
2114                }
2115                if (args->valuelen < valuelen) {
2116                        args->valuelen = valuelen;
2117                        return(XFS_ERROR(ERANGE));
2118                }
2119                args->valuelen = valuelen;
2120        }
2121        return(0);
2122}
2123
2124/*========================================================================
2125 * Utility routines.
2126 *========================================================================*/
2127
2128/*
2129 * Move the indicated entries from one leaf to another.
2130 * NOTE: this routine modifies both source and destination leaves.
2131 */
2132/*ARGSUSED*/
2133STATIC void
2134xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2135                        xfs_attr_leafblock_t *leaf_d, int start_d,
2136                        int count, xfs_mount_t *mp)
2137{
2138        xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
2139        xfs_attr_leaf_entry_t *entry_s, *entry_d;
2140        int desti, tmp, i;
2141
2142        /*
2143         * Check for nothing to do.
2144         */
2145        if (count == 0)
2146                return;
2147
2148        /*
2149         * Set up environment.
2150         */
2151        ASSERT(leaf_s->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2152        ASSERT(leaf_d->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2153        hdr_s = &leaf_s->hdr;
2154        hdr_d = &leaf_d->hdr;
2155        ASSERT((be16_to_cpu(hdr_s->count) > 0) &&
2156               (be16_to_cpu(hdr_s->count) < (XFS_LBSIZE(mp)/8)));
2157        ASSERT(be16_to_cpu(hdr_s->firstused) >=
2158                ((be16_to_cpu(hdr_s->count)
2159                                        * sizeof(*entry_s))+sizeof(*hdr_s)));
2160        ASSERT(be16_to_cpu(hdr_d->count) < (XFS_LBSIZE(mp)/8));
2161        ASSERT(be16_to_cpu(hdr_d->firstused) >=
2162                ((be16_to_cpu(hdr_d->count)
2163                                        * sizeof(*entry_d))+sizeof(*hdr_d)));
2164
2165        ASSERT(start_s < be16_to_cpu(hdr_s->count));
2166        ASSERT(start_d <= be16_to_cpu(hdr_d->count));
2167        ASSERT(count <= be16_to_cpu(hdr_s->count));
2168
2169        /*
2170         * Move the entries in the destination leaf up to make a hole?
2171         */
2172        if (start_d < be16_to_cpu(hdr_d->count)) {
2173                tmp  = be16_to_cpu(hdr_d->count) - start_d;
2174                tmp *= sizeof(xfs_attr_leaf_entry_t);
2175                entry_s = &leaf_d->entries[start_d];
2176                entry_d = &leaf_d->entries[start_d + count];
2177                memmove((char *)entry_d, (char *)entry_s, tmp);
2178        }
2179
2180        /*
2181         * Copy all entry's in the same (sorted) order,
2182         * but allocate attribute info packed and in sequence.
2183         */
2184        entry_s = &leaf_s->entries[start_s];
2185        entry_d = &leaf_d->entries[start_d];
2186        desti = start_d;
2187        for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
2188                ASSERT(be16_to_cpu(entry_s->nameidx)
2189                                >= be16_to_cpu(hdr_s->firstused));
2190                tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
2191#ifdef GROT
2192                /*
2193                 * Code to drop INCOMPLETE entries.  Difficult to use as we
2194                 * may also need to change the insertion index.  Code turned
2195                 * off for 6.2, should be revisited later.
2196                 */
2197                if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
2198                        memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp);
2199                        be16_add_cpu(&hdr_s->usedbytes, -tmp);
2200                        be16_add_cpu(&hdr_s->count, -1);
2201                        entry_d--;      /* to compensate for ++ in loop hdr */
2202                        desti--;
2203                        if ((start_s + i) < offset)
2204                                result++;       /* insertion index adjustment */
2205                } else {
2206#endif /* GROT */
2207                        be16_add_cpu(&hdr_d->firstused, -tmp);
2208                        /* both on-disk, don't endian flip twice */
2209                        entry_d->hashval = entry_s->hashval;
2210                        /* both on-disk, don't endian flip twice */
2211                        entry_d->nameidx = hdr_d->firstused;
2212                        entry_d->flags = entry_s->flags;
2213                        ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
2214                                                        <= XFS_LBSIZE(mp));
2215                        memmove(xfs_attr_leaf_name(leaf_d, desti),
2216                                xfs_attr_leaf_name(leaf_s, start_s + i), tmp);
2217                        ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
2218                                                        <= XFS_LBSIZE(mp));
2219                        memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp);
2220                        be16_add_cpu(&hdr_s->usedbytes, -tmp);
2221                        be16_add_cpu(&hdr_d->usedbytes, tmp);
2222                        be16_add_cpu(&hdr_s->count, -1);
2223                        be16_add_cpu(&hdr_d->count, 1);
2224                        tmp = be16_to_cpu(hdr_d->count)
2225                                                * sizeof(xfs_attr_leaf_entry_t)
2226                                                + sizeof(xfs_attr_leaf_hdr_t);
2227                        ASSERT(be16_to_cpu(hdr_d->firstused) >= tmp);
2228#ifdef GROT
2229                }
2230#endif /* GROT */
2231        }
2232
2233        /*
2234         * Zero out the entries we just copied.
2235         */
2236        if (start_s == be16_to_cpu(hdr_s->count)) {
2237                tmp = count * sizeof(xfs_attr_leaf_entry_t);
2238                entry_s = &leaf_s->entries[start_s];
2239                ASSERT(((char *)entry_s + tmp) <=
2240                       ((char *)leaf_s + XFS_LBSIZE(mp)));
2241                memset((char *)entry_s, 0, tmp);
2242        } else {
2243                /*
2244                 * Move the remaining entries down to fill the hole,
2245                 * then zero the entries at the top.
2246                 */
2247                tmp  = be16_to_cpu(hdr_s->count) - count;
2248                tmp *= sizeof(xfs_attr_leaf_entry_t);
2249                entry_s = &leaf_s->entries[start_s + count];
2250                entry_d = &leaf_s->entries[start_s];
2251                memmove((char *)entry_d, (char *)entry_s, tmp);
2252
2253                tmp = count * sizeof(xfs_attr_leaf_entry_t);
2254                entry_s = &leaf_s->entries[be16_to_cpu(hdr_s->count)];
2255                ASSERT(((char *)entry_s + tmp) <=
2256                       ((char *)leaf_s + XFS_LBSIZE(mp)));
2257                memset((char *)entry_s, 0, tmp);
2258        }
2259
2260        /*
2261         * Fill in the freemap information
2262         */
2263        hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
2264        be16_add_cpu(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) *
2265                        sizeof(xfs_attr_leaf_entry_t));
2266        hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused)
2267                              - be16_to_cpu(hdr_d->freemap[0].base));
2268        hdr_d->freemap[1].base = 0;
2269        hdr_d->freemap[2].base = 0;
2270        hdr_d->freemap[1].size = 0;
2271        hdr_d->freemap[2].size = 0;
2272        hdr_s->holes = 1;       /* leaf may not be compact */
2273}
2274
2275/*
2276 * Compare two leaf blocks "order".
2277 * Return 0 unless leaf2 should go before leaf1.
2278 */
2279int
2280xfs_attr_leaf_order(
2281        struct xfs_buf  *leaf1_bp,
2282        struct xfs_buf  *leaf2_bp)
2283{
2284        xfs_attr_leafblock_t *leaf1, *leaf2;
2285
2286        leaf1 = leaf1_bp->b_addr;
2287        leaf2 = leaf2_bp->b_addr;
2288        ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) &&
2289               (leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)));
2290        if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
2291            (be16_to_cpu(leaf2->hdr.count) > 0) &&
2292            ((be32_to_cpu(leaf2->entries[0].hashval) <
2293              be32_to_cpu(leaf1->entries[0].hashval)) ||
2294             (be32_to_cpu(leaf2->entries[
2295                        be16_to_cpu(leaf2->hdr.count)-1].hashval) <
2296              be32_to_cpu(leaf1->entries[
2297                        be16_to_cpu(leaf1->hdr.count)-1].hashval)))) {
2298                return(1);
2299        }
2300        return(0);
2301}
2302
2303/*
2304 * Pick up the last hashvalue from a leaf block.
2305 */
2306xfs_dahash_t
2307xfs_attr_leaf_lasthash(
2308        struct xfs_buf  *bp,
2309        int             *count)
2310{
2311        xfs_attr_leafblock_t *leaf;
2312
2313        leaf = bp->b_addr;
2314        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2315        if (count)
2316                *count = be16_to_cpu(leaf->hdr.count);
2317        if (!leaf->hdr.count)
2318                return(0);
2319        return be32_to_cpu(leaf->entries[be16_to_cpu(leaf->hdr.count)-1].hashval);
2320}
2321
2322/*
2323 * Calculate the number of bytes used to store the indicated attribute
2324 * (whether local or remote only calculate bytes in this block).
2325 */
2326STATIC int
2327xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
2328{
2329        xfs_attr_leaf_name_local_t *name_loc;
2330        xfs_attr_leaf_name_remote_t *name_rmt;
2331        int size;
2332
2333        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2334        if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
2335                name_loc = xfs_attr_leaf_name_local(leaf, index);
2336                size = xfs_attr_leaf_entsize_local(name_loc->namelen,
2337                                                   be16_to_cpu(name_loc->valuelen));
2338        } else {
2339                name_rmt = xfs_attr_leaf_name_remote(leaf, index);
2340                size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
2341        }
2342        return(size);
2343}
2344
2345/*
2346 * Calculate the number of bytes that would be required to store the new
2347 * attribute (whether local or remote only calculate bytes in this block).
2348 * This routine decides as a side effect whether the attribute will be
2349 * a "local" or a "remote" attribute.
2350 */
2351int
2352xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local)
2353{
2354        int size;
2355
2356        size = xfs_attr_leaf_entsize_local(namelen, valuelen);
2357        if (size < xfs_attr_leaf_entsize_local_max(blocksize)) {
2358                if (local) {
2359                        *local = 1;
2360                }
2361        } else {
2362                size = xfs_attr_leaf_entsize_remote(namelen);
2363                if (local) {
2364                        *local = 0;
2365                }
2366        }
2367        return(size);
2368}
2369
2370/*
2371 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
2372 */
2373int
2374xfs_attr_leaf_list_int(
2375        struct xfs_buf          *bp,
2376        xfs_attr_list_context_t *context)
2377{
2378        attrlist_cursor_kern_t *cursor;
2379        xfs_attr_leafblock_t *leaf;
2380        xfs_attr_leaf_entry_t *entry;
2381        int retval, i;
2382
2383        ASSERT(bp != NULL);
2384        leaf = bp->b_addr;
2385        cursor = context->cursor;
2386        cursor->initted = 1;
2387
2388        trace_xfs_attr_list_leaf(context);
2389
2390        /*
2391         * Re-find our place in the leaf block if this is a new syscall.
2392         */
2393        if (context->resynch) {
2394                entry = &leaf->entries[0];
2395                for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
2396                        if (be32_to_cpu(entry->hashval) == cursor->hashval) {
2397                                if (cursor->offset == context->dupcnt) {
2398                                        context->dupcnt = 0;
2399                                        break;
2400                                }
2401                                context->dupcnt++;
2402                        } else if (be32_to_cpu(entry->hashval) >
2403                                        cursor->hashval) {
2404                                context->dupcnt = 0;
2405                                break;
2406                        }
2407                }
2408                if (i == be16_to_cpu(leaf->hdr.count)) {
2409                        trace_xfs_attr_list_notfound(context);
2410                        return(0);
2411                }
2412        } else {
2413                entry = &leaf->entries[0];
2414                i = 0;
2415        }
2416        context->resynch = 0;
2417
2418        /*
2419         * We have found our place, start copying out the new attributes.
2420         */
2421        retval = 0;
2422        for (  ; (i < be16_to_cpu(leaf->hdr.count)); entry++, i++) {
2423                if (be32_to_cpu(entry->hashval) != cursor->hashval) {
2424                        cursor->hashval = be32_to_cpu(entry->hashval);
2425                        cursor->offset = 0;
2426                }
2427
2428                if (entry->flags & XFS_ATTR_INCOMPLETE)
2429                        continue;               /* skip incomplete entries */
2430
2431                if (entry->flags & XFS_ATTR_LOCAL) {
2432                        xfs_attr_leaf_name_local_t *name_loc =
2433                                xfs_attr_leaf_name_local(leaf, i);
2434
2435                        retval = context->put_listent(context,
2436                                                entry->flags,
2437                                                name_loc->nameval,
2438                                                (int)name_loc->namelen,
2439                                                be16_to_cpu(name_loc->valuelen),
2440                                                &name_loc->nameval[name_loc->namelen]);
2441                        if (retval)
2442                                return retval;
2443                } else {
2444                        xfs_attr_leaf_name_remote_t *name_rmt =
2445                                xfs_attr_leaf_name_remote(leaf, i);
2446
2447                        int valuelen = be32_to_cpu(name_rmt->valuelen);
2448
2449                        if (context->put_value) {
2450                                xfs_da_args_t args;
2451
2452                                memset((char *)&args, 0, sizeof(args));
2453                                args.dp = context->dp;
2454                                args.whichfork = XFS_ATTR_FORK;
2455                                args.valuelen = valuelen;
2456                                args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
2457                                args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
2458                                args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
2459                                retval = xfs_attr_rmtval_get(&args);
2460                                if (retval)
2461                                        return retval;
2462                                retval = context->put_listent(context,
2463                                                entry->flags,
2464                                                name_rmt->name,
2465                                                (int)name_rmt->namelen,
2466                                                valuelen,
2467                                                args.value);
2468                                kmem_free(args.value);
2469                        } else {
2470                                retval = context->put_listent(context,
2471                                                entry->flags,
2472                                                name_rmt->name,
2473                                                (int)name_rmt->namelen,
2474                                                valuelen,
2475                                                NULL);
2476                        }
2477                        if (retval)
2478                                return retval;
2479                }
2480                if (context->seen_enough)
2481                        break;
2482                cursor->offset++;
2483        }
2484        trace_xfs_attr_list_leaf_end(context);
2485        return(retval);
2486}
2487
2488
2489/*========================================================================
2490 * Manage the INCOMPLETE flag in a leaf entry
2491 *========================================================================*/
2492
2493/*
2494 * Clear the INCOMPLETE flag on an entry in a leaf block.
2495 */
2496int
2497xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2498{
2499        xfs_attr_leafblock_t *leaf;
2500        xfs_attr_leaf_entry_t *entry;
2501        xfs_attr_leaf_name_remote_t *name_rmt;
2502        struct xfs_buf *bp;
2503        int error;
2504#ifdef DEBUG
2505        xfs_attr_leaf_name_local_t *name_loc;
2506        int namelen;
2507        char *name;
2508#endif /* DEBUG */
2509
2510        trace_xfs_attr_leaf_clearflag(args);
2511        /*
2512         * Set up the operation.
2513         */
2514        error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
2515                                             XFS_ATTR_FORK);
2516        if (error) {
2517                return(error);
2518        }
2519        ASSERT(bp != NULL);
2520
2521        leaf = bp->b_addr;
2522        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2523        ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2524        ASSERT(args->index >= 0);
2525        entry = &leaf->entries[ args->index ];
2526        ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
2527
2528#ifdef DEBUG
2529        if (entry->flags & XFS_ATTR_LOCAL) {
2530                name_loc = xfs_attr_leaf_name_local(leaf, args->index);
2531                namelen = name_loc->namelen;
2532                name = (char *)name_loc->nameval;
2533        } else {
2534                name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
2535                namelen = name_rmt->namelen;
2536                name = (char *)name_rmt->name;
2537        }
2538        ASSERT(be32_to_cpu(entry->hashval) == args->hashval);
2539        ASSERT(namelen == args->namelen);
2540        ASSERT(memcmp(name, args->name, namelen) == 0);
2541#endif /* DEBUG */
2542
2543        entry->flags &= ~XFS_ATTR_INCOMPLETE;
2544        xfs_trans_log_buf(args->trans, bp,
2545                         XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2546
2547        if (args->rmtblkno) {
2548                ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
2549                name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
2550                name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2551                name_rmt->valuelen = cpu_to_be32(args->valuelen);
2552                xfs_trans_log_buf(args->trans, bp,
2553                         XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2554        }
2555
2556        /*
2557         * Commit the flag value change and start the next trans in series.
2558         */
2559        return xfs_trans_roll(&args->trans, args->dp);
2560}
2561
2562/*
2563 * Set the INCOMPLETE flag on an entry in a leaf block.
2564 */
2565int
2566xfs_attr_leaf_setflag(xfs_da_args_t *args)
2567{
2568        xfs_attr_leafblock_t *leaf;
2569        xfs_attr_leaf_entry_t *entry;
2570        xfs_attr_leaf_name_remote_t *name_rmt;
2571        struct xfs_buf *bp;
2572        int error;
2573
2574        trace_xfs_attr_leaf_setflag(args);
2575
2576        /*
2577         * Set up the operation.
2578         */
2579        error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
2580                                             XFS_ATTR_FORK);
2581        if (error) {
2582                return(error);
2583        }
2584        ASSERT(bp != NULL);
2585
2586        leaf = bp->b_addr;
2587        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2588        ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2589        ASSERT(args->index >= 0);
2590        entry = &leaf->entries[ args->index ];
2591
2592        ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
2593        entry->flags |= XFS_ATTR_INCOMPLETE;
2594        xfs_trans_log_buf(args->trans, bp,
2595                        XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2596        if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
2597                name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
2598                name_rmt->valueblk = 0;
2599                name_rmt->valuelen = 0;
2600                xfs_trans_log_buf(args->trans, bp,
2601                         XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2602        }
2603
2604        /*
2605         * Commit the flag value change and start the next trans in series.
2606         */
2607        return xfs_trans_roll(&args->trans, args->dp);
2608}
2609
2610/*
2611 * In a single transaction, clear the INCOMPLETE flag on the leaf entry
2612 * given by args->blkno/index and set the INCOMPLETE flag on the leaf
2613 * entry given by args->blkno2/index2.
2614 *
2615 * Note that they could be in different blocks, or in the same block.
2616 */
2617int
2618xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2619{
2620        xfs_attr_leafblock_t *leaf1, *leaf2;
2621        xfs_attr_leaf_entry_t *entry1, *entry2;
2622        xfs_attr_leaf_name_remote_t *name_rmt;
2623        struct xfs_buf *bp1, *bp2;
2624        int error;
2625#ifdef DEBUG
2626        xfs_attr_leaf_name_local_t *name_loc;
2627        int namelen1, namelen2;
2628        char *name1, *name2;
2629#endif /* DEBUG */
2630
2631        trace_xfs_attr_leaf_flipflags(args);
2632
2633        /*
2634         * Read the block containing the "old" attr
2635         */
2636        error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp1,
2637                                             XFS_ATTR_FORK);
2638        if (error) {
2639                return(error);
2640        }
2641        ASSERT(bp1 != NULL);
2642
2643        /*
2644         * Read the block containing the "new" attr, if it is different
2645         */
2646        if (args->blkno2 != args->blkno) {
2647                error = xfs_da_read_buf(args->trans, args->dp, args->blkno2,
2648                                        -1, &bp2, XFS_ATTR_FORK);
2649                if (error) {
2650                        return(error);
2651                }
2652                ASSERT(bp2 != NULL);
2653        } else {
2654                bp2 = bp1;
2655        }
2656
2657        leaf1 = bp1->b_addr;
2658        ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2659        ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
2660        ASSERT(args->index >= 0);
2661        entry1 = &leaf1->entries[ args->index ];
2662
2663        leaf2 = bp2->b_addr;
2664        ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2665        ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
2666        ASSERT(args->index2 >= 0);
2667        entry2 = &leaf2->entries[ args->index2 ];
2668
2669#ifdef DEBUG
2670        if (entry1->flags & XFS_ATTR_LOCAL) {
2671                name_loc = xfs_attr_leaf_name_local(leaf1, args->index);
2672                namelen1 = name_loc->namelen;
2673                name1 = (char *)name_loc->nameval;
2674        } else {
2675                name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
2676                namelen1 = name_rmt->namelen;
2677                name1 = (char *)name_rmt->name;
2678        }
2679        if (entry2->flags & XFS_ATTR_LOCAL) {
2680                name_loc = xfs_attr_leaf_name_local(leaf2, args->index2);
2681                namelen2 = name_loc->namelen;
2682                name2 = (char *)name_loc->nameval;
2683        } else {
2684                name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
2685                namelen2 = name_rmt->namelen;
2686                name2 = (char *)name_rmt->name;
2687        }
2688        ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval));
2689        ASSERT(namelen1 == namelen2);
2690        ASSERT(memcmp(name1, name2, namelen1) == 0);
2691#endif /* DEBUG */
2692
2693        ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE);
2694        ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
2695
2696        entry1->flags &= ~XFS_ATTR_INCOMPLETE;
2697        xfs_trans_log_buf(args->trans, bp1,
2698                          XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
2699        if (args->rmtblkno) {
2700                ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
2701                name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
2702                name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2703                name_rmt->valuelen = cpu_to_be32(args->valuelen);
2704                xfs_trans_log_buf(args->trans, bp1,
2705                         XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
2706        }
2707
2708        entry2->flags |= XFS_ATTR_INCOMPLETE;
2709        xfs_trans_log_buf(args->trans, bp2,
2710                          XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
2711        if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
2712                name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
2713                name_rmt->valueblk = 0;
2714                name_rmt->valuelen = 0;
2715                xfs_trans_log_buf(args->trans, bp2,
2716                         XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
2717        }
2718
2719        /*
2720         * Commit the flag value change and start the next trans in series.
2721         */
2722        error = xfs_trans_roll(&args->trans, args->dp);
2723
2724        return(error);
2725}
2726
2727/*========================================================================
2728 * Indiscriminately delete the entire attribute fork
2729 *========================================================================*/
2730
2731/*
2732 * Recurse (gasp!) through the attribute nodes until we find leaves.
2733 * We're doing a depth-first traversal in order to invalidate everything.
2734 */
2735int
2736xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
2737{
2738        xfs_da_blkinfo_t *info;
2739        xfs_daddr_t blkno;
2740        struct xfs_buf *bp;
2741        int error;
2742
2743        /*
2744         * Read block 0 to see what we have to work with.
2745         * We only get here if we have extents, since we remove
2746         * the extents in reverse order the extent containing
2747         * block 0 must still be there.
2748         */
2749        error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
2750        if (error)
2751                return(error);
2752        blkno = XFS_BUF_ADDR(bp);
2753
2754        /*
2755         * Invalidate the tree, even if the "tree" is only a single leaf block.
2756         * This is a depth-first traversal!
2757         */
2758        info = bp->b_addr;
2759        if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
2760                error = xfs_attr_node_inactive(trans, dp, bp, 1);
2761        } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
2762                error = xfs_attr_leaf_inactive(trans, dp, bp);
2763        } else {
2764                error = XFS_ERROR(EIO);
2765                xfs_trans_brelse(*trans, bp);
2766        }
2767        if (error)
2768                return(error);
2769
2770        /*
2771         * Invalidate the incore copy of the root block.
2772         */
2773        error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
2774        if (error)
2775                return(error);
2776        xfs_trans_binval(*trans, bp);   /* remove from cache */
2777        /*
2778         * Commit the invalidate and start the next transaction.
2779         */
2780        error = xfs_trans_roll(trans, dp);
2781
2782        return (error);
2783}
2784
2785/*
2786 * Recurse (gasp!) through the attribute nodes until we find leaves.
2787 * We're doing a depth-first traversal in order to invalidate everything.
2788 */
2789STATIC int
2790xfs_attr_node_inactive(
2791        struct xfs_trans **trans,
2792        struct xfs_inode *dp,
2793        struct xfs_buf  *bp,
2794        int             level)
2795{
2796        xfs_da_blkinfo_t *info;
2797        xfs_da_intnode_t *node;
2798        xfs_dablk_t child_fsb;
2799        xfs_daddr_t parent_blkno, child_blkno;
2800        int error, count, i;
2801        struct xfs_buf *child_bp;
2802
2803        /*
2804         * Since this code is recursive (gasp!) we must protect ourselves.
2805         */
2806        if (level > XFS_DA_NODE_MAXDEPTH) {
2807                xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
2808                return(XFS_ERROR(EIO));
2809        }
2810
2811        node = bp->b_addr;
2812        ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
2813        parent_blkno = XFS_BUF_ADDR(bp);        /* save for re-read later */
2814        count = be16_to_cpu(node->hdr.count);
2815        if (!count) {
2816                xfs_trans_brelse(*trans, bp);
2817                return(0);
2818        }
2819        child_fsb = be32_to_cpu(node->btree[0].before);
2820        xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
2821
2822        /*
2823         * If this is the node level just above the leaves, simply loop
2824         * over the leaves removing all of them.  If this is higher up
2825         * in the tree, recurse downward.
2826         */
2827        for (i = 0; i < count; i++) {
2828                /*
2829                 * Read the subsidiary block to see what we have to work with.
2830                 * Don't do this in a transaction.  This is a depth-first
2831                 * traversal of the tree so we may deal with many blocks
2832                 * before we come back to this one.
2833                 */
2834                error = xfs_da_read_buf(*trans, dp, child_fsb, -2, &child_bp,
2835                                                XFS_ATTR_FORK);
2836                if (error)
2837                        return(error);
2838                if (child_bp) {
2839                                                /* save for re-read later */
2840                        child_blkno = XFS_BUF_ADDR(child_bp);
2841
2842                        /*
2843                         * Invalidate the subtree, however we have to.
2844                         */
2845                        info = child_bp->b_addr;
2846                        if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
2847                                error = xfs_attr_node_inactive(trans, dp,
2848                                                child_bp, level+1);
2849                        } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
2850                                error = xfs_attr_leaf_inactive(trans, dp,
2851                                                child_bp);
2852                        } else {
2853                                error = XFS_ERROR(EIO);
2854                                xfs_trans_brelse(*trans, child_bp);
2855                        }
2856                        if (error)
2857                                return(error);
2858
2859                        /*
2860                         * Remove the subsidiary block from the cache
2861                         * and from the log.
2862                         */
2863                        error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
2864                                &child_bp, XFS_ATTR_FORK);
2865                        if (error)
2866                                return(error);
2867                        xfs_trans_binval(*trans, child_bp);
2868                }
2869
2870                /*
2871                 * If we're not done, re-read the parent to get the next
2872                 * child block number.
2873                 */
2874                if ((i+1) < count) {
2875                        error = xfs_da_read_buf(*trans, dp, 0, parent_blkno,
2876                                &bp, XFS_ATTR_FORK);
2877                        if (error)
2878                                return(error);
2879                        child_fsb = be32_to_cpu(node->btree[i+1].before);
2880                        xfs_trans_brelse(*trans, bp);
2881                }
2882                /*
2883                 * Atomically commit the whole invalidate stuff.
2884                 */
2885                error = xfs_trans_roll(trans, dp);
2886                if (error)
2887                        return (error);
2888        }
2889
2890        return(0);
2891}
2892
2893/*
2894 * Invalidate all of the "remote" value regions pointed to by a particular
2895 * leaf block.
2896 * Note that we must release the lock on the buffer so that we are not
2897 * caught holding something that the logging code wants to flush to disk.
2898 */
2899STATIC int
2900xfs_attr_leaf_inactive(
2901        struct xfs_trans **trans,
2902        struct xfs_inode *dp,
2903        struct xfs_buf  *bp)
2904{
2905        xfs_attr_leafblock_t *leaf;
2906        xfs_attr_leaf_entry_t *entry;
2907        xfs_attr_leaf_name_remote_t *name_rmt;
2908        xfs_attr_inactive_list_t *list, *lp;
2909        int error, count, size, tmp, i;
2910
2911        leaf = bp->b_addr;
2912        ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2913
2914        /*
2915         * Count the number of "remote" value extents.
2916         */
2917        count = 0;
2918        entry = &leaf->entries[0];
2919        for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
2920                if (be16_to_cpu(entry->nameidx) &&
2921                    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
2922                        name_rmt = xfs_attr_leaf_name_remote(leaf, i);
2923                        if (name_rmt->valueblk)
2924                                count++;
2925                }
2926        }
2927
2928        /*
2929         * If there are no "remote" values, we're done.
2930         */
2931        if (count == 0) {
2932                xfs_trans_brelse(*trans, bp);
2933                return(0);
2934        }
2935
2936        /*
2937         * Allocate storage for a list of all the "remote" value extents.
2938         */
2939        size = count * sizeof(xfs_attr_inactive_list_t);
2940        list = (xfs_attr_inactive_list_t *)kmem_alloc(size, KM_SLEEP);
2941
2942        /*
2943         * Identify each of the "remote" value extents.
2944         */
2945        lp = list;
2946        entry = &leaf->entries[0];
2947        for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
2948                if (be16_to_cpu(entry->nameidx) &&
2949                    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
2950                        name_rmt = xfs_attr_leaf_name_remote(leaf, i);
2951                        if (name_rmt->valueblk) {
2952                                lp->valueblk = be32_to_cpu(name_rmt->valueblk);
2953                                lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
2954                                                    be32_to_cpu(name_rmt->valuelen));
2955                                lp++;
2956                        }
2957                }
2958        }
2959        xfs_trans_brelse(*trans, bp);   /* unlock for trans. in freextent() */
2960
2961        /*
2962         * Invalidate each of the "remote" value extents.
2963         */
2964        error = 0;
2965        for (lp = list, i = 0; i < count; i++, lp++) {
2966                tmp = xfs_attr_leaf_freextent(trans, dp,
2967                                lp->valueblk, lp->valuelen);
2968
2969                if (error == 0)
2970                        error = tmp;    /* save only the 1st errno */
2971        }
2972
2973        kmem_free((xfs_caddr_t)list);
2974        return(error);
2975}
2976
2977/*
2978 * Look at all the extents for this logical region,
2979 * invalidate any buffers that are incore/in transactions.
2980 */
2981STATIC int
2982xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
2983                                    xfs_dablk_t blkno, int blkcnt)
2984{
2985        xfs_bmbt_irec_t map;
2986        xfs_dablk_t tblkno;
2987        int tblkcnt, dblkcnt, nmap, error;
2988        xfs_daddr_t dblkno;
2989        xfs_buf_t *bp;
2990
2991        /*
2992         * Roll through the "value", invalidating the attribute value's
2993         * blocks.
2994         */
2995        tblkno = blkno;
2996        tblkcnt = blkcnt;
2997        while (tblkcnt > 0) {
2998                /*
2999                 * Try to remember where we decided to put the value.
3000                 */
3001                nmap = 1;
3002                error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
3003                                       &map, &nmap, XFS_BMAPI_ATTRFORK);
3004                if (error) {
3005                        return(error);
3006                }
3007                ASSERT(nmap == 1);
3008                ASSERT(map.br_startblock != DELAYSTARTBLOCK);
3009
3010                /*
3011                 * If it's a hole, these are already unmapped
3012                 * so there's nothing to invalidate.
3013                 */
3014                if (map.br_startblock != HOLESTARTBLOCK) {
3015
3016                        dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
3017                                                  map.br_startblock);
3018                        dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
3019                                                map.br_blockcount);
3020                        bp = xfs_trans_get_buf(*trans,
3021                                        dp->i_mount->m_ddev_targp,
3022                                        dblkno, dblkcnt, 0);
3023                        if (!bp)
3024                                return ENOMEM;
3025                        xfs_trans_binval(*trans, bp);
3026                        /*
3027                         * Roll to next transaction.
3028                         */
3029                        error = xfs_trans_roll(trans, dp);
3030                        if (error)
3031                                return (error);
3032                }
3033
3034                tblkno += map.br_blockcount;
3035                tblkcnt -= map.br_blockcount;
3036        }
3037
3038        return(0);
3039}
3040
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.