linux/fs/xfs/xfs_itable.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_log.h"
  22#include "xfs_inum.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_mount.h"
  27#include "xfs_bmap_btree.h"
  28#include "xfs_alloc_btree.h"
  29#include "xfs_ialloc_btree.h"
  30#include "xfs_dinode.h"
  31#include "xfs_inode.h"
  32#include "xfs_ialloc.h"
  33#include "xfs_itable.h"
  34#include "xfs_error.h"
  35#include "xfs_btree.h"
  36#include "xfs_trace.h"
  37#include "xfs_icache.h"
  38
  39STATIC int
  40xfs_internal_inum(
  41        xfs_mount_t     *mp,
  42        xfs_ino_t       ino)
  43{
  44        return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
  45                (xfs_sb_version_hasquota(&mp->m_sb) &&
  46                 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
  47}
  48
  49/*
  50 * Return stat information for one inode.
  51 * Return 0 if ok, else errno.
  52 */
  53int
  54xfs_bulkstat_one_int(
  55        struct xfs_mount        *mp,            /* mount point for filesystem */
  56        xfs_ino_t               ino,            /* inode to get data for */
  57        void __user             *buffer,        /* buffer to place output in */
  58        int                     ubsize,         /* size of buffer */
  59        bulkstat_one_fmt_pf     formatter,      /* formatter, copy to user */
  60        int                     *ubused,        /* bytes used by me */
  61        int                     *stat)          /* BULKSTAT_RV_... */
  62{
  63        struct xfs_icdinode     *dic;           /* dinode core info pointer */
  64        struct xfs_inode        *ip;            /* incore inode pointer */
  65        struct xfs_bstat        *buf;           /* return buffer */
  66        int                     error = 0;      /* error value */
  67
  68        *stat = BULKSTAT_RV_NOTHING;
  69
  70        if (!buffer || xfs_internal_inum(mp, ino))
  71                return XFS_ERROR(EINVAL);
  72
  73        buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
  74        if (!buf)
  75                return XFS_ERROR(ENOMEM);
  76
  77        error = xfs_iget(mp, NULL, ino,
  78                         (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
  79                         XFS_ILOCK_SHARED, &ip);
  80        if (error) {
  81                *stat = BULKSTAT_RV_NOTHING;
  82                goto out_free;
  83        }
  84
  85        ASSERT(ip != NULL);
  86        ASSERT(ip->i_imap.im_blkno != 0);
  87
  88        dic = &ip->i_d;
  89
  90        /* xfs_iget returns the following without needing
  91         * further change.
  92         */
  93        buf->bs_nlink = dic->di_nlink;
  94        buf->bs_projid_lo = dic->di_projid_lo;
  95        buf->bs_projid_hi = dic->di_projid_hi;
  96        buf->bs_ino = ino;
  97        buf->bs_mode = dic->di_mode;
  98        buf->bs_uid = dic->di_uid;
  99        buf->bs_gid = dic->di_gid;
 100        buf->bs_size = dic->di_size;
 101        buf->bs_atime.tv_sec = dic->di_atime.t_sec;
 102        buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
 103        buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
 104        buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
 105        buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
 106        buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
 107        buf->bs_xflags = xfs_ip2xflags(ip);
 108        buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
 109        buf->bs_extents = dic->di_nextents;
 110        buf->bs_gen = dic->di_gen;
 111        memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
 112        buf->bs_dmevmask = dic->di_dmevmask;
 113        buf->bs_dmstate = dic->di_dmstate;
 114        buf->bs_aextents = dic->di_anextents;
 115        buf->bs_forkoff = XFS_IFORK_BOFF(ip);
 116
 117        switch (dic->di_format) {
 118        case XFS_DINODE_FMT_DEV:
 119                buf->bs_rdev = ip->i_df.if_u2.if_rdev;
 120                buf->bs_blksize = BLKDEV_IOSIZE;
 121                buf->bs_blocks = 0;
 122                break;
 123        case XFS_DINODE_FMT_LOCAL:
 124        case XFS_DINODE_FMT_UUID:
 125                buf->bs_rdev = 0;
 126                buf->bs_blksize = mp->m_sb.sb_blocksize;
 127                buf->bs_blocks = 0;
 128                break;
 129        case XFS_DINODE_FMT_EXTENTS:
 130        case XFS_DINODE_FMT_BTREE:
 131                buf->bs_rdev = 0;
 132                buf->bs_blksize = mp->m_sb.sb_blocksize;
 133                buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
 134                break;
 135        }
 136        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 137        IRELE(ip);
 138
 139        error = formatter(buffer, ubsize, ubused, buf);
 140
 141        if (!error)
 142                *stat = BULKSTAT_RV_DIDONE;
 143
 144 out_free:
 145        kmem_free(buf);
 146        return error;
 147}
 148
 149/* Return 0 on success or positive error */
 150STATIC int
 151xfs_bulkstat_one_fmt(
 152        void                    __user *ubuffer,
 153        int                     ubsize,
 154        int                     *ubused,
 155        const xfs_bstat_t       *buffer)
 156{
 157        if (ubsize < sizeof(*buffer))
 158                return XFS_ERROR(ENOMEM);
 159        if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
 160                return XFS_ERROR(EFAULT);
 161        if (ubused)
 162                *ubused = sizeof(*buffer);
 163        return 0;
 164}
 165
 166int
 167xfs_bulkstat_one(
 168        xfs_mount_t     *mp,            /* mount point for filesystem */
 169        xfs_ino_t       ino,            /* inode number to get data for */
 170        void            __user *buffer, /* buffer to place output in */
 171        int             ubsize,         /* size of buffer */
 172        int             *ubused,        /* bytes used by me */
 173        int             *stat)          /* BULKSTAT_RV_... */
 174{
 175        return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
 176                                    xfs_bulkstat_one_fmt, ubused, stat);
 177}
 178
 179#define XFS_BULKSTAT_UBLEFT(ubleft)     ((ubleft) >= statstruct_size)
 180
 181/*
 182 * Return stat information in bulk (by-inode) for the filesystem.
 183 */
 184int                                     /* error status */
 185xfs_bulkstat(
 186        xfs_mount_t             *mp,    /* mount point for filesystem */
 187        xfs_ino_t               *lastinop, /* last inode returned */
 188        int                     *ubcountp, /* size of buffer/count returned */
 189        bulkstat_one_pf         formatter, /* func that'd fill a single buf */
 190        size_t                  statstruct_size, /* sizeof struct filling */
 191        char                    __user *ubuffer, /* buffer with inode stats */
 192        int                     *done)  /* 1 if there are more stats to get */
 193{
 194        xfs_agblock_t           agbno=0;/* allocation group block number */
 195        xfs_buf_t               *agbp;  /* agi header buffer */
 196        xfs_agi_t               *agi;   /* agi header data */
 197        xfs_agino_t             agino;  /* inode # in allocation group */
 198        xfs_agnumber_t          agno;   /* allocation group number */
 199        int                     chunkidx; /* current index into inode chunk */
 200        int                     clustidx; /* current index into inode cluster */
 201        xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
 202        int                     end_of_ag; /* set if we've seen the ag end */
 203        int                     error;  /* error code */
 204        int                     fmterror;/* bulkstat formatter result */
 205        int                     i;      /* loop index */
 206        int                     icount; /* count of inodes good in irbuf */
 207        size_t                  irbsize; /* size of irec buffer in bytes */
 208        xfs_ino_t               ino;    /* inode number (filesystem) */
 209        xfs_inobt_rec_incore_t  *irbp;  /* current irec buffer pointer */
 210        xfs_inobt_rec_incore_t  *irbuf; /* start of irec buffer */
 211        xfs_inobt_rec_incore_t  *irbufend; /* end of good irec buffer entries */
 212        xfs_ino_t               lastino; /* last inode number returned */
 213        int                     nbcluster; /* # of blocks in a cluster */
 214        int                     nicluster; /* # of inodes in a cluster */
 215        int                     nimask; /* mask for inode clusters */
 216        int                     nirbuf; /* size of irbuf */
 217        int                     rval;   /* return value error code */
 218        int                     tmp;    /* result value from btree calls */
 219        int                     ubcount; /* size of user's buffer */
 220        int                     ubleft; /* bytes left in user's buffer */
 221        char                    __user *ubufp;  /* pointer into user's buffer */
 222        int                     ubelem; /* spaces used in user's buffer */
 223        int                     ubused; /* bytes used by formatter */
 224        xfs_buf_t               *bp;    /* ptr to on-disk inode cluster buf */
 225
 226        /*
 227         * Get the last inode value, see if there's nothing to do.
 228         */
 229        ino = (xfs_ino_t)*lastinop;
 230        lastino = ino;
 231        agno = XFS_INO_TO_AGNO(mp, ino);
 232        agino = XFS_INO_TO_AGINO(mp, ino);
 233        if (agno >= mp->m_sb.sb_agcount ||
 234            ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
 235                *done = 1;
 236                *ubcountp = 0;
 237                return 0;
 238        }
 239        if (!ubcountp || *ubcountp <= 0) {
 240                return EINVAL;
 241        }
 242        ubcount = *ubcountp; /* statstruct's */
 243        ubleft = ubcount * statstruct_size; /* bytes */
 244        *ubcountp = ubelem = 0;
 245        *done = 0;
 246        fmterror = 0;
 247        ubufp = ubuffer;
 248        nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ?
 249                mp->m_sb.sb_inopblock :
 250                (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
 251        nimask = ~(nicluster - 1);
 252        nbcluster = nicluster >> mp->m_sb.sb_inopblog;
 253        irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
 254        if (!irbuf)
 255                return ENOMEM;
 256
 257        nirbuf = irbsize / sizeof(*irbuf);
 258
 259        /*
 260         * Loop over the allocation groups, starting from the last
 261         * inode returned; 0 means start of the allocation group.
 262         */
 263        rval = 0;
 264        while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
 265                cond_resched();
 266                bp = NULL;
 267                error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
 268                if (error) {
 269                        /*
 270                         * Skip this allocation group and go to the next one.
 271                         */
 272                        agno++;
 273                        agino = 0;
 274                        continue;
 275                }
 276                agi = XFS_BUF_TO_AGI(agbp);
 277                /*
 278                 * Allocate and initialize a btree cursor for ialloc btree.
 279                 */
 280                cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
 281                irbp = irbuf;
 282                irbufend = irbuf + nirbuf;
 283                end_of_ag = 0;
 284                /*
 285                 * If we're returning in the middle of an allocation group,
 286                 * we need to get the remainder of the chunk we're in.
 287                 */
 288                if (agino > 0) {
 289                        xfs_inobt_rec_incore_t r;
 290
 291                        /*
 292                         * Lookup the inode chunk that this inode lives in.
 293                         */
 294                        error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE,
 295                                                 &tmp);
 296                        if (!error &&   /* no I/O error */
 297                            tmp &&      /* lookup succeeded */
 298                                        /* got the record, should always work */
 299                            !(error = xfs_inobt_get_rec(cur, &r, &i)) &&
 300                            i == 1 &&
 301                                        /* this is the right chunk */
 302                            agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
 303                                        /* lastino was not last in chunk */
 304                            (chunkidx = agino - r.ir_startino + 1) <
 305                                    XFS_INODES_PER_CHUNK &&
 306                                        /* there are some left allocated */
 307                            xfs_inobt_maskn(chunkidx,
 308                                    XFS_INODES_PER_CHUNK - chunkidx) &
 309                                    ~r.ir_free) {
 310                                /*
 311                                 * Grab the chunk record.  Mark all the
 312                                 * uninteresting inodes (because they're
 313                                 * before our start point) free.
 314                                 */
 315                                for (i = 0; i < chunkidx; i++) {
 316                                        if (XFS_INOBT_MASK(i) & ~r.ir_free)
 317                                                r.ir_freecount++;
 318                                }
 319                                r.ir_free |= xfs_inobt_maskn(0, chunkidx);
 320                                irbp->ir_startino = r.ir_startino;
 321                                irbp->ir_freecount = r.ir_freecount;
 322                                irbp->ir_free = r.ir_free;
 323                                irbp++;
 324                                agino = r.ir_startino + XFS_INODES_PER_CHUNK;
 325                                icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
 326                        } else {
 327                                /*
 328                                 * If any of those tests failed, bump the
 329                                 * inode number (just in case).
 330                                 */
 331                                agino++;
 332                                icount = 0;
 333                        }
 334                        /*
 335                         * In any case, increment to the next record.
 336                         */
 337                        if (!error)
 338                                error = xfs_btree_increment(cur, 0, &tmp);
 339                } else {
 340                        /*
 341                         * Start of ag.  Lookup the first inode chunk.
 342                         */
 343                        error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
 344                        icount = 0;
 345                }
 346                /*
 347                 * Loop through inode btree records in this ag,
 348                 * until we run out of inodes or space in the buffer.
 349                 */
 350                while (irbp < irbufend && icount < ubcount) {
 351                        xfs_inobt_rec_incore_t r;
 352
 353                        /*
 354                         * Loop as long as we're unable to read the
 355                         * inode btree.
 356                         */
 357                        while (error) {
 358                                agino += XFS_INODES_PER_CHUNK;
 359                                if (XFS_AGINO_TO_AGBNO(mp, agino) >=
 360                                                be32_to_cpu(agi->agi_length))
 361                                        break;
 362                                error = xfs_inobt_lookup(cur, agino,
 363                                                         XFS_LOOKUP_GE, &tmp);
 364                                cond_resched();
 365                        }
 366                        /*
 367                         * If ran off the end of the ag either with an error,
 368                         * or the normal way, set end and stop collecting.
 369                         */
 370                        if (error) {
 371                                end_of_ag = 1;
 372                                break;
 373                        }
 374
 375                        error = xfs_inobt_get_rec(cur, &r, &i);
 376                        if (error || i == 0) {
 377                                end_of_ag = 1;
 378                                break;
 379                        }
 380
 381                        /*
 382                         * If this chunk has any allocated inodes, save it.
 383                         * Also start read-ahead now for this chunk.
 384                         */
 385                        if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
 386                                /*
 387                                 * Loop over all clusters in the next chunk.
 388                                 * Do a readahead if there are any allocated
 389                                 * inodes in that cluster.
 390                                 */
 391                                agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
 392                                for (chunkidx = 0;
 393                                     chunkidx < XFS_INODES_PER_CHUNK;
 394                                     chunkidx += nicluster,
 395                                     agbno += nbcluster) {
 396                                        if (xfs_inobt_maskn(chunkidx, nicluster)
 397                                                        & ~r.ir_free)
 398                                                xfs_btree_reada_bufs(mp, agno,
 399                                                        agbno, nbcluster,
 400                                                        &xfs_inode_buf_ops);
 401                                }
 402                                irbp->ir_startino = r.ir_startino;
 403                                irbp->ir_freecount = r.ir_freecount;
 404                                irbp->ir_free = r.ir_free;
 405                                irbp++;
 406                                icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
 407                        }
 408                        /*
 409                         * Set agino to after this chunk and bump the cursor.
 410                         */
 411                        agino = r.ir_startino + XFS_INODES_PER_CHUNK;
 412                        error = xfs_btree_increment(cur, 0, &tmp);
 413                        cond_resched();
 414                }
 415                /*
 416                 * Drop the btree buffers and the agi buffer.
 417                 * We can't hold any of the locks these represent
 418                 * when calling iget.
 419                 */
 420                xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 421                xfs_buf_relse(agbp);
 422                /*
 423                 * Now format all the good inodes into the user's buffer.
 424                 */
 425                irbufend = irbp;
 426                for (irbp = irbuf;
 427                     irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
 428                        /*
 429                         * Now process this chunk of inodes.
 430                         */
 431                        for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
 432                             XFS_BULKSTAT_UBLEFT(ubleft) &&
 433                                irbp->ir_freecount < XFS_INODES_PER_CHUNK;
 434                             chunkidx++, clustidx++, agino++) {
 435                                ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
 436                                /*
 437                                 * Recompute agbno if this is the
 438                                 * first inode of the cluster.
 439                                 *
 440                                 * Careful with clustidx.   There can be
 441                                 * multiple clusters per chunk, a single
 442                                 * cluster per chunk or a cluster that has
 443                                 * inodes represented from several different
 444                                 * chunks (if blocksize is large).
 445                                 *
 446                                 * Because of this, the starting clustidx is
 447                                 * initialized to zero in this loop but must
 448                                 * later be reset after reading in the cluster
 449                                 * buffer.
 450                                 */
 451                                if ((chunkidx & (nicluster - 1)) == 0) {
 452                                        agbno = XFS_AGINO_TO_AGBNO(mp,
 453                                                        irbp->ir_startino) +
 454                                                ((chunkidx & nimask) >>
 455                                                 mp->m_sb.sb_inopblog);
 456                                }
 457                                ino = XFS_AGINO_TO_INO(mp, agno, agino);
 458                                /*
 459                                 * Skip if this inode is free.
 460                                 */
 461                                if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
 462                                        lastino = ino;
 463                                        continue;
 464                                }
 465                                /*
 466                                 * Count used inodes as free so we can tell
 467                                 * when the chunk is used up.
 468                                 */
 469                                irbp->ir_freecount++;
 470
 471                                /*
 472                                 * Get the inode and fill in a single buffer.
 473                                 */
 474                                ubused = statstruct_size;
 475                                error = formatter(mp, ino, ubufp, ubleft,
 476                                                  &ubused, &fmterror);
 477                                if (fmterror == BULKSTAT_RV_NOTHING) {
 478                                        if (error && error != ENOENT &&
 479                                                error != EINVAL) {
 480                                                ubleft = 0;
 481                                                rval = error;
 482                                                break;
 483                                        }
 484                                        lastino = ino;
 485                                        continue;
 486                                }
 487                                if (fmterror == BULKSTAT_RV_GIVEUP) {
 488                                        ubleft = 0;
 489                                        ASSERT(error);
 490                                        rval = error;
 491                                        break;
 492                                }
 493                                if (ubufp)
 494                                        ubufp += ubused;
 495                                ubleft -= ubused;
 496                                ubelem++;
 497                                lastino = ino;
 498                        }
 499
 500                        cond_resched();
 501                }
 502
 503                if (bp)
 504                        xfs_buf_relse(bp);
 505
 506                /*
 507                 * Set up for the next loop iteration.
 508                 */
 509                if (XFS_BULKSTAT_UBLEFT(ubleft)) {
 510                        if (end_of_ag) {
 511                                agno++;
 512                                agino = 0;
 513                        } else
 514                                agino = XFS_INO_TO_AGINO(mp, lastino);
 515                } else
 516                        break;
 517        }
 518        /*
 519         * Done, we're either out of filesystem or space to put the data.
 520         */
 521        kmem_free_large(irbuf);
 522        *ubcountp = ubelem;
 523        /*
 524         * Found some inodes, return them now and return the error next time.
 525         */
 526        if (ubelem)
 527                rval = 0;
 528        if (agno >= mp->m_sb.sb_agcount) {
 529                /*
 530                 * If we ran out of filesystem, mark lastino as off
 531                 * the end of the filesystem, so the next call
 532                 * will return immediately.
 533                 */
 534                *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
 535                *done = 1;
 536        } else
 537                *lastinop = (xfs_ino_t)lastino;
 538
 539        return rval;
 540}
 541
 542/*
 543 * Return stat information in bulk (by-inode) for the filesystem.
 544 * Special case for non-sequential one inode bulkstat.
 545 */
 546int                                     /* error status */
 547xfs_bulkstat_single(
 548        xfs_mount_t             *mp,    /* mount point for filesystem */
 549        xfs_ino_t               *lastinop, /* inode to return */
 550        char                    __user *buffer, /* buffer with inode stats */
 551        int                     *done)  /* 1 if there are more stats to get */
 552{
 553        int                     count;  /* count value for bulkstat call */
 554        int                     error;  /* return value */
 555        xfs_ino_t               ino;    /* filesystem inode number */
 556        int                     res;    /* result from bs1 */
 557
 558        /*
 559         * note that requesting valid inode numbers which are not allocated
 560         * to inodes will most likely cause xfs_imap_to_bp to generate warning
 561         * messages about bad magic numbers. This is ok. The fact that
 562         * the inode isn't actually an inode is handled by the
 563         * error check below. Done this way to make the usual case faster
 564         * at the expense of the error case.
 565         */
 566
 567        ino = (xfs_ino_t)*lastinop;
 568        error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
 569        if (error) {
 570                /*
 571                 * Special case way failed, do it the "long" way
 572                 * to see if that works.
 573                 */
 574                (*lastinop)--;
 575                count = 1;
 576                if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
 577                                sizeof(xfs_bstat_t), buffer, done))
 578                        return error;
 579                if (count == 0 || (xfs_ino_t)*lastinop != ino)
 580                        return error == EFSCORRUPTED ?
 581                                XFS_ERROR(EINVAL) : error;
 582                else
 583                        return 0;
 584        }
 585        *done = 0;
 586        return 0;
 587}
 588
 589int
 590xfs_inumbers_fmt(
 591        void                    __user *ubuffer, /* buffer to write to */
 592        const xfs_inogrp_t      *buffer,        /* buffer to read from */
 593        long                    count,          /* # of elements to read */
 594        long                    *written)       /* # of bytes written */
 595{
 596        if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
 597                return -EFAULT;
 598        *written = count * sizeof(*buffer);
 599        return 0;
 600}
 601
 602/*
 603 * Return inode number table for the filesystem.
 604 */
 605int                                     /* error status */
 606xfs_inumbers(
 607        xfs_mount_t     *mp,            /* mount point for filesystem */
 608        xfs_ino_t       *lastino,       /* last inode returned */
 609        int             *count,         /* size of buffer/count returned */
 610        void            __user *ubuffer,/* buffer with inode descriptions */
 611        inumbers_fmt_pf formatter)
 612{
 613        xfs_buf_t       *agbp;
 614        xfs_agino_t     agino;
 615        xfs_agnumber_t  agno;
 616        int             bcount;
 617        xfs_inogrp_t    *buffer;
 618        int             bufidx;
 619        xfs_btree_cur_t *cur;
 620        int             error;
 621        xfs_inobt_rec_incore_t r;
 622        int             i;
 623        xfs_ino_t       ino;
 624        int             left;
 625        int             tmp;
 626
 627        ino = (xfs_ino_t)*lastino;
 628        agno = XFS_INO_TO_AGNO(mp, ino);
 629        agino = XFS_INO_TO_AGINO(mp, ino);
 630        left = *count;
 631        *count = 0;
 632        bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
 633        buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
 634        error = bufidx = 0;
 635        cur = NULL;
 636        agbp = NULL;
 637        while (left > 0 && agno < mp->m_sb.sb_agcount) {
 638                if (agbp == NULL) {
 639                        error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
 640                        if (error) {
 641                                /*
 642                                 * If we can't read the AGI of this ag,
 643                                 * then just skip to the next one.
 644                                 */
 645                                ASSERT(cur == NULL);
 646                                agbp = NULL;
 647                                agno++;
 648                                agino = 0;
 649                                continue;
 650                        }
 651                        cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
 652                        error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
 653                                                 &tmp);
 654                        if (error) {
 655                                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 656                                cur = NULL;
 657                                xfs_buf_relse(agbp);
 658                                agbp = NULL;
 659                                /*
 660                                 * Move up the last inode in the current
 661                                 * chunk.  The lookup_ge will always get
 662                                 * us the first inode in the next chunk.
 663                                 */
 664                                agino += XFS_INODES_PER_CHUNK - 1;
 665                                continue;
 666                        }
 667                }
 668                error = xfs_inobt_get_rec(cur, &r, &i);
 669                if (error || i == 0) {
 670                        xfs_buf_relse(agbp);
 671                        agbp = NULL;
 672                        xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 673                        cur = NULL;
 674                        agno++;
 675                        agino = 0;
 676                        continue;
 677                }
 678                agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
 679                buffer[bufidx].xi_startino =
 680                        XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
 681                buffer[bufidx].xi_alloccount =
 682                        XFS_INODES_PER_CHUNK - r.ir_freecount;
 683                buffer[bufidx].xi_allocmask = ~r.ir_free;
 684                bufidx++;
 685                left--;
 686                if (bufidx == bcount) {
 687                        long written;
 688                        if (formatter(ubuffer, buffer, bufidx, &written)) {
 689                                error = XFS_ERROR(EFAULT);
 690                                break;
 691                        }
 692                        ubuffer += written;
 693                        *count += bufidx;
 694                        bufidx = 0;
 695                }
 696                if (left) {
 697                        error = xfs_btree_increment(cur, 0, &tmp);
 698                        if (error) {
 699                                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 700                                cur = NULL;
 701                                xfs_buf_relse(agbp);
 702                                agbp = NULL;
 703                                /*
 704                                 * The agino value has already been bumped.
 705                                 * Just try to skip up to it.
 706                                 */
 707                                agino += XFS_INODES_PER_CHUNK;
 708                                continue;
 709                        }
 710                }
 711        }
 712        if (!error) {
 713                if (bufidx) {
 714                        long written;
 715                        if (formatter(ubuffer, buffer, bufidx, &written))
 716                                error = XFS_ERROR(EFAULT);
 717                        else
 718                                *count += bufidx;
 719                }
 720                *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
 721        }
 722        kmem_free(buffer);
 723        if (cur)
 724                xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
 725                                           XFS_BTREE_NOERROR));
 726        if (agbp)
 727                xfs_buf_relse(agbp);
 728        return error;
 729}
 730
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.