linux/fs/xfs/xfs_qm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_bit.h"
  21#include "xfs_log.h"
  22#include "xfs_trans.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_alloc.h"
  26#include "xfs_quota.h"
  27#include "xfs_mount.h"
  28#include "xfs_bmap_btree.h"
  29#include "xfs_ialloc_btree.h"
  30#include "xfs_dinode.h"
  31#include "xfs_inode.h"
  32#include "xfs_ialloc.h"
  33#include "xfs_itable.h"
  34#include "xfs_rtalloc.h"
  35#include "xfs_error.h"
  36#include "xfs_bmap.h"
  37#include "xfs_attr.h"
  38#include "xfs_buf_item.h"
  39#include "xfs_trans_space.h"
  40#include "xfs_utils.h"
  41#include "xfs_qm.h"
  42#include "xfs_trace.h"
  43
  44/*
  45 * The global quota manager. There is only one of these for the entire
  46 * system, _not_ one per file system. XQM keeps track of the overall
  47 * quota functionality, including maintaining the freelist and hash
  48 * tables of dquots.
  49 */
  50STATIC int      xfs_qm_init_quotainos(xfs_mount_t *);
  51STATIC int      xfs_qm_init_quotainfo(xfs_mount_t *);
  52STATIC int      xfs_qm_shake(struct shrinker *, struct shrink_control *);
  53
  54/*
  55 * We use the batch lookup interface to iterate over the dquots as it
  56 * currently is the only interface into the radix tree code that allows
  57 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  58 * operations is fine as all callers are used either during mount/umount
  59 * or quotaoff.
  60 */
  61#define XFS_DQ_LOOKUP_BATCH     32
  62
  63STATIC int
  64xfs_qm_dquot_walk(
  65        struct xfs_mount        *mp,
  66        int                     type,
  67        int                     (*execute)(struct xfs_dquot *dqp, void *data),
  68        void                    *data)
  69{
  70        struct xfs_quotainfo    *qi = mp->m_quotainfo;
  71        struct radix_tree_root  *tree = XFS_DQUOT_TREE(qi, type);
  72        uint32_t                next_index;
  73        int                     last_error = 0;
  74        int                     skipped;
  75        int                     nr_found;
  76
  77restart:
  78        skipped = 0;
  79        next_index = 0;
  80        nr_found = 0;
  81
  82        while (1) {
  83                struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  84                int             error = 0;
  85                int             i;
  86
  87                mutex_lock(&qi->qi_tree_lock);
  88                nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  89                                        next_index, XFS_DQ_LOOKUP_BATCH);
  90                if (!nr_found) {
  91                        mutex_unlock(&qi->qi_tree_lock);
  92                        break;
  93                }
  94
  95                for (i = 0; i < nr_found; i++) {
  96                        struct xfs_dquot *dqp = batch[i];
  97
  98                        next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  99
 100                        error = execute(batch[i], data);
 101                        if (error == EAGAIN) {
 102                                skipped++;
 103                                continue;
 104                        }
 105                        if (error && last_error != EFSCORRUPTED)
 106                                last_error = error;
 107                }
 108
 109                mutex_unlock(&qi->qi_tree_lock);
 110
 111                /* bail out if the filesystem is corrupted.  */
 112                if (last_error == EFSCORRUPTED) {
 113                        skipped = 0;
 114                        break;
 115                }
 116        }
 117
 118        if (skipped) {
 119                delay(1);
 120                goto restart;
 121        }
 122
 123        return last_error;
 124}
 125
 126
 127/*
 128 * Purge a dquot from all tracking data structures and free it.
 129 */
 130STATIC int
 131xfs_qm_dqpurge(
 132        struct xfs_dquot        *dqp,
 133        void                    *data)
 134{
 135        struct xfs_mount        *mp = dqp->q_mount;
 136        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 137        struct xfs_dquot        *gdqp = NULL;
 138
 139        xfs_dqlock(dqp);
 140        if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
 141                xfs_dqunlock(dqp);
 142                return EAGAIN;
 143        }
 144
 145        /*
 146         * If this quota has a group hint attached, prepare for releasing it
 147         * now.
 148         */
 149        gdqp = dqp->q_gdquot;
 150        if (gdqp) {
 151                xfs_dqlock(gdqp);
 152                dqp->q_gdquot = NULL;
 153        }
 154
 155        dqp->dq_flags |= XFS_DQ_FREEING;
 156
 157        xfs_dqflock(dqp);
 158
 159        /*
 160         * If we are turning this type of quotas off, we don't care
 161         * about the dirty metadata sitting in this dquot. OTOH, if
 162         * we're unmounting, we do care, so we flush it and wait.
 163         */
 164        if (XFS_DQ_IS_DIRTY(dqp)) {
 165                struct xfs_buf  *bp = NULL;
 166                int             error;
 167
 168                /*
 169                 * We don't care about getting disk errors here. We need
 170                 * to purge this dquot anyway, so we go ahead regardless.
 171                 */
 172                error = xfs_qm_dqflush(dqp, &bp);
 173                if (error) {
 174                        xfs_warn(mp, "%s: dquot %p flush failed",
 175                                __func__, dqp);
 176                } else {
 177                        error = xfs_bwrite(bp);
 178                        xfs_buf_relse(bp);
 179                }
 180                xfs_dqflock(dqp);
 181        }
 182
 183        ASSERT(atomic_read(&dqp->q_pincount) == 0);
 184        ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 185               !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
 186
 187        xfs_dqfunlock(dqp);
 188        xfs_dqunlock(dqp);
 189
 190        radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
 191                          be32_to_cpu(dqp->q_core.d_id));
 192        qi->qi_dquots--;
 193
 194        /*
 195         * We move dquots to the freelist as soon as their reference count
 196         * hits zero, so it really should be on the freelist here.
 197         */
 198        mutex_lock(&qi->qi_lru_lock);
 199        ASSERT(!list_empty(&dqp->q_lru));
 200        list_del_init(&dqp->q_lru);
 201        qi->qi_lru_count--;
 202        XFS_STATS_DEC(xs_qm_dquot_unused);
 203        mutex_unlock(&qi->qi_lru_lock);
 204
 205        xfs_qm_dqdestroy(dqp);
 206
 207        if (gdqp)
 208                xfs_qm_dqput(gdqp);
 209        return 0;
 210}
 211
 212/*
 213 * Purge the dquot cache.
 214 */
 215void
 216xfs_qm_dqpurge_all(
 217        struct xfs_mount        *mp,
 218        uint                    flags)
 219{
 220        if (flags & XFS_QMOPT_UQUOTA)
 221                xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
 222        if (flags & XFS_QMOPT_GQUOTA)
 223                xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
 224        if (flags & XFS_QMOPT_PQUOTA)
 225                xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
 226}
 227
 228/*
 229 * Just destroy the quotainfo structure.
 230 */
 231void
 232xfs_qm_unmount(
 233        struct xfs_mount        *mp)
 234{
 235        if (mp->m_quotainfo) {
 236                xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 237                xfs_qm_destroy_quotainfo(mp);
 238        }
 239}
 240
 241
 242/*
 243 * This is called from xfs_mountfs to start quotas and initialize all
 244 * necessary data structures like quotainfo.  This is also responsible for
 245 * running a quotacheck as necessary.  We are guaranteed that the superblock
 246 * is consistently read in at this point.
 247 *
 248 * If we fail here, the mount will continue with quota turned off. We don't
 249 * need to inidicate success or failure at all.
 250 */
 251void
 252xfs_qm_mount_quotas(
 253        xfs_mount_t     *mp)
 254{
 255        int             error = 0;
 256        uint            sbf;
 257
 258        /*
 259         * If quotas on realtime volumes is not supported, we disable
 260         * quotas immediately.
 261         */
 262        if (mp->m_sb.sb_rextents) {
 263                xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
 264                mp->m_qflags = 0;
 265                goto write_changes;
 266        }
 267
 268        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 269
 270        /*
 271         * Allocate the quotainfo structure inside the mount struct, and
 272         * create quotainode(s), and change/rev superblock if necessary.
 273         */
 274        error = xfs_qm_init_quotainfo(mp);
 275        if (error) {
 276                /*
 277                 * We must turn off quotas.
 278                 */
 279                ASSERT(mp->m_quotainfo == NULL);
 280                mp->m_qflags = 0;
 281                goto write_changes;
 282        }
 283        /*
 284         * If any of the quotas are not consistent, do a quotacheck.
 285         */
 286        if (XFS_QM_NEED_QUOTACHECK(mp)) {
 287                error = xfs_qm_quotacheck(mp);
 288                if (error) {
 289                        /* Quotacheck failed and disabled quotas. */
 290                        return;
 291                }
 292        }
 293        /* 
 294         * If one type of quotas is off, then it will lose its
 295         * quotachecked status, since we won't be doing accounting for
 296         * that type anymore.
 297         */
 298        if (!XFS_IS_UQUOTA_ON(mp))
 299                mp->m_qflags &= ~XFS_UQUOTA_CHKD;
 300        if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
 301                mp->m_qflags &= ~XFS_OQUOTA_CHKD;
 302
 303 write_changes:
 304        /*
 305         * We actually don't have to acquire the m_sb_lock at all.
 306         * This can only be called from mount, and that's single threaded. XXX
 307         */
 308        spin_lock(&mp->m_sb_lock);
 309        sbf = mp->m_sb.sb_qflags;
 310        mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
 311        spin_unlock(&mp->m_sb_lock);
 312
 313        if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
 314                if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
 315                        /*
 316                         * We could only have been turning quotas off.
 317                         * We aren't in very good shape actually because
 318                         * the incore structures are convinced that quotas are
 319                         * off, but the on disk superblock doesn't know that !
 320                         */
 321                        ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
 322                        xfs_alert(mp, "%s: Superblock update failed!",
 323                                __func__);
 324                }
 325        }
 326
 327        if (error) {
 328                xfs_warn(mp, "Failed to initialize disk quotas.");
 329                return;
 330        }
 331}
 332
 333/*
 334 * Called from the vfsops layer.
 335 */
 336void
 337xfs_qm_unmount_quotas(
 338        xfs_mount_t     *mp)
 339{
 340        /*
 341         * Release the dquots that root inode, et al might be holding,
 342         * before we flush quotas and blow away the quotainfo structure.
 343         */
 344        ASSERT(mp->m_rootip);
 345        xfs_qm_dqdetach(mp->m_rootip);
 346        if (mp->m_rbmip)
 347                xfs_qm_dqdetach(mp->m_rbmip);
 348        if (mp->m_rsumip)
 349                xfs_qm_dqdetach(mp->m_rsumip);
 350
 351        /*
 352         * Release the quota inodes.
 353         */
 354        if (mp->m_quotainfo) {
 355                if (mp->m_quotainfo->qi_uquotaip) {
 356                        IRELE(mp->m_quotainfo->qi_uquotaip);
 357                        mp->m_quotainfo->qi_uquotaip = NULL;
 358                }
 359                if (mp->m_quotainfo->qi_gquotaip) {
 360                        IRELE(mp->m_quotainfo->qi_gquotaip);
 361                        mp->m_quotainfo->qi_gquotaip = NULL;
 362                }
 363        }
 364}
 365
 366STATIC int
 367xfs_qm_dqattach_one(
 368        xfs_inode_t     *ip,
 369        xfs_dqid_t      id,
 370        uint            type,
 371        uint            doalloc,
 372        xfs_dquot_t     *udqhint, /* hint */
 373        xfs_dquot_t     **IO_idqpp)
 374{
 375        xfs_dquot_t     *dqp;
 376        int             error;
 377
 378        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 379        error = 0;
 380
 381        /*
 382         * See if we already have it in the inode itself. IO_idqpp is
 383         * &i_udquot or &i_gdquot. This made the code look weird, but
 384         * made the logic a lot simpler.
 385         */
 386        dqp = *IO_idqpp;
 387        if (dqp) {
 388                trace_xfs_dqattach_found(dqp);
 389                return 0;
 390        }
 391
 392        /*
 393         * udqhint is the i_udquot field in inode, and is non-NULL only
 394         * when the type arg is group/project. Its purpose is to save a
 395         * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
 396         * the user dquot.
 397         */
 398        if (udqhint) {
 399                ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
 400                xfs_dqlock(udqhint);
 401
 402                /*
 403                 * No need to take dqlock to look at the id.
 404                 *
 405                 * The ID can't change until it gets reclaimed, and it won't
 406                 * be reclaimed as long as we have a ref from inode and we
 407                 * hold the ilock.
 408                 */
 409                dqp = udqhint->q_gdquot;
 410                if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
 411                        ASSERT(*IO_idqpp == NULL);
 412
 413                        *IO_idqpp = xfs_qm_dqhold(dqp);
 414                        xfs_dqunlock(udqhint);
 415                        return 0;
 416                }
 417
 418                /*
 419                 * We can't hold a dquot lock when we call the dqget code.
 420                 * We'll deadlock in no time, because of (not conforming to)
 421                 * lock ordering - the inodelock comes before any dquot lock,
 422                 * and we may drop and reacquire the ilock in xfs_qm_dqget().
 423                 */
 424                xfs_dqunlock(udqhint);
 425        }
 426
 427        /*
 428         * Find the dquot from somewhere. This bumps the
 429         * reference count of dquot and returns it locked.
 430         * This can return ENOENT if dquot didn't exist on
 431         * disk and we didn't ask it to allocate;
 432         * ESRCH if quotas got turned off suddenly.
 433         */
 434        error = xfs_qm_dqget(ip->i_mount, ip, id, type,
 435                             doalloc | XFS_QMOPT_DOWARN, &dqp);
 436        if (error)
 437                return error;
 438
 439        trace_xfs_dqattach_get(dqp);
 440
 441        /*
 442         * dqget may have dropped and re-acquired the ilock, but it guarantees
 443         * that the dquot returned is the one that should go in the inode.
 444         */
 445        *IO_idqpp = dqp;
 446        xfs_dqunlock(dqp);
 447        return 0;
 448}
 449
 450
 451/*
 452 * Given a udquot and gdquot, attach a ptr to the group dquot in the
 453 * udquot as a hint for future lookups.
 454 */
 455STATIC void
 456xfs_qm_dqattach_grouphint(
 457        xfs_dquot_t     *udq,
 458        xfs_dquot_t     *gdq)
 459{
 460        xfs_dquot_t     *tmp;
 461
 462        xfs_dqlock(udq);
 463
 464        tmp = udq->q_gdquot;
 465        if (tmp) {
 466                if (tmp == gdq)
 467                        goto done;
 468
 469                udq->q_gdquot = NULL;
 470                xfs_qm_dqrele(tmp);
 471        }
 472
 473        udq->q_gdquot = xfs_qm_dqhold(gdq);
 474done:
 475        xfs_dqunlock(udq);
 476}
 477
 478static bool
 479xfs_qm_need_dqattach(
 480        struct xfs_inode        *ip)
 481{
 482        struct xfs_mount        *mp = ip->i_mount;
 483
 484        if (!XFS_IS_QUOTA_RUNNING(mp))
 485                return false;
 486        if (!XFS_IS_QUOTA_ON(mp))
 487                return false;
 488        if (!XFS_NOT_DQATTACHED(mp, ip))
 489                return false;
 490        if (ip->i_ino == mp->m_sb.sb_uquotino ||
 491            ip->i_ino == mp->m_sb.sb_gquotino)
 492                return false;
 493        return true;
 494}
 495
 496/*
 497 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 498 * into account.
 499 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
 500 * Inode may get unlocked and relocked in here, and the caller must deal with
 501 * the consequences.
 502 */
 503int
 504xfs_qm_dqattach_locked(
 505        xfs_inode_t     *ip,
 506        uint            flags)
 507{
 508        xfs_mount_t     *mp = ip->i_mount;
 509        uint            nquotas = 0;
 510        int             error = 0;
 511
 512        if (!xfs_qm_need_dqattach(ip))
 513                return 0;
 514
 515        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 516
 517        if (XFS_IS_UQUOTA_ON(mp)) {
 518                error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
 519                                                flags & XFS_QMOPT_DQALLOC,
 520                                                NULL, &ip->i_udquot);
 521                if (error)
 522                        goto done;
 523                nquotas++;
 524        }
 525
 526        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 527        if (XFS_IS_OQUOTA_ON(mp)) {
 528                error = XFS_IS_GQUOTA_ON(mp) ?
 529                        xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
 530                                                flags & XFS_QMOPT_DQALLOC,
 531                                                ip->i_udquot, &ip->i_gdquot) :
 532                        xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
 533                                                flags & XFS_QMOPT_DQALLOC,
 534                                                ip->i_udquot, &ip->i_gdquot);
 535                /*
 536                 * Don't worry about the udquot that we may have
 537                 * attached above. It'll get detached, if not already.
 538                 */
 539                if (error)
 540                        goto done;
 541                nquotas++;
 542        }
 543
 544        /*
 545         * Attach this group quota to the user quota as a hint.
 546         * This WON'T, in general, result in a thrash.
 547         */
 548        if (nquotas == 2) {
 549                ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 550                ASSERT(ip->i_udquot);
 551                ASSERT(ip->i_gdquot);
 552
 553                /*
 554                 * We do not have i_udquot locked at this point, but this check
 555                 * is OK since we don't depend on the i_gdquot to be accurate
 556                 * 100% all the time. It is just a hint, and this will
 557                 * succeed in general.
 558                 */
 559                if (ip->i_udquot->q_gdquot != ip->i_gdquot)
 560                        xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
 561        }
 562
 563 done:
 564#ifdef DEBUG
 565        if (!error) {
 566                if (XFS_IS_UQUOTA_ON(mp))
 567                        ASSERT(ip->i_udquot);
 568                if (XFS_IS_OQUOTA_ON(mp))
 569                        ASSERT(ip->i_gdquot);
 570        }
 571        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 572#endif
 573        return error;
 574}
 575
 576int
 577xfs_qm_dqattach(
 578        struct xfs_inode        *ip,
 579        uint                    flags)
 580{
 581        int                     error;
 582
 583        if (!xfs_qm_need_dqattach(ip))
 584                return 0;
 585
 586        xfs_ilock(ip, XFS_ILOCK_EXCL);
 587        error = xfs_qm_dqattach_locked(ip, flags);
 588        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 589
 590        return error;
 591}
 592
 593/*
 594 * Release dquots (and their references) if any.
 595 * The inode should be locked EXCL except when this's called by
 596 * xfs_ireclaim.
 597 */
 598void
 599xfs_qm_dqdetach(
 600        xfs_inode_t     *ip)
 601{
 602        if (!(ip->i_udquot || ip->i_gdquot))
 603                return;
 604
 605        trace_xfs_dquot_dqdetach(ip);
 606
 607        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
 608        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
 609        if (ip->i_udquot) {
 610                xfs_qm_dqrele(ip->i_udquot);
 611                ip->i_udquot = NULL;
 612        }
 613        if (ip->i_gdquot) {
 614                xfs_qm_dqrele(ip->i_gdquot);
 615                ip->i_gdquot = NULL;
 616        }
 617}
 618
 619/*
 620 * This initializes all the quota information that's kept in the
 621 * mount structure
 622 */
 623STATIC int
 624xfs_qm_init_quotainfo(
 625        xfs_mount_t     *mp)
 626{
 627        xfs_quotainfo_t *qinf;
 628        int             error;
 629        xfs_dquot_t     *dqp;
 630
 631        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 632
 633        qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 634
 635        /*
 636         * See if quotainodes are setup, and if not, allocate them,
 637         * and change the superblock accordingly.
 638         */
 639        if ((error = xfs_qm_init_quotainos(mp))) {
 640                kmem_free(qinf);
 641                mp->m_quotainfo = NULL;
 642                return error;
 643        }
 644
 645        INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 646        INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 647        mutex_init(&qinf->qi_tree_lock);
 648
 649        INIT_LIST_HEAD(&qinf->qi_lru_list);
 650        qinf->qi_lru_count = 0;
 651        mutex_init(&qinf->qi_lru_lock);
 652
 653        /* mutex used to serialize quotaoffs */
 654        mutex_init(&qinf->qi_quotaofflock);
 655
 656        /* Precalc some constants */
 657        qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 658        ASSERT(qinf->qi_dqchunklen);
 659        qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
 660        do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
 661
 662        mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 663
 664        /*
 665         * We try to get the limits from the superuser's limits fields.
 666         * This is quite hacky, but it is standard quota practice.
 667         *
 668         * We look at the USR dquot with id == 0 first, but if user quotas
 669         * are not enabled we goto the GRP dquot with id == 0.
 670         * We don't really care to keep separate default limits for user
 671         * and group quotas, at least not at this point.
 672         *
 673         * Since we may not have done a quotacheck by this point, just read
 674         * the dquot without attaching it to any hashtables or lists.
 675         */
 676        error = xfs_qm_dqread(mp, 0,
 677                        XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
 678                         (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
 679                          XFS_DQ_PROJ),
 680                        XFS_QMOPT_DOWARN, &dqp);
 681        if (!error) {
 682                xfs_disk_dquot_t        *ddqp = &dqp->q_core;
 683
 684                /*
 685                 * The warnings and timers set the grace period given to
 686                 * a user or group before he or she can not perform any
 687                 * more writing. If it is zero, a default is used.
 688                 */
 689                qinf->qi_btimelimit = ddqp->d_btimer ?
 690                        be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
 691                qinf->qi_itimelimit = ddqp->d_itimer ?
 692                        be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
 693                qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
 694                        be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
 695                qinf->qi_bwarnlimit = ddqp->d_bwarns ?
 696                        be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
 697                qinf->qi_iwarnlimit = ddqp->d_iwarns ?
 698                        be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
 699                qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
 700                        be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
 701                qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
 702                qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
 703                qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
 704                qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
 705                qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
 706                qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 707 
 708                xfs_qm_dqdestroy(dqp);
 709        } else {
 710                qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
 711                qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
 712                qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
 713                qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
 714                qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
 715                qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
 716        }
 717
 718        qinf->qi_shrinker.shrink = xfs_qm_shake;
 719        qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 720        register_shrinker(&qinf->qi_shrinker);
 721        return 0;
 722}
 723
 724
 725/*
 726 * Gets called when unmounting a filesystem or when all quotas get
 727 * turned off.
 728 * This purges the quota inodes, destroys locks and frees itself.
 729 */
 730void
 731xfs_qm_destroy_quotainfo(
 732        xfs_mount_t     *mp)
 733{
 734        xfs_quotainfo_t *qi;
 735
 736        qi = mp->m_quotainfo;
 737        ASSERT(qi != NULL);
 738
 739        unregister_shrinker(&qi->qi_shrinker);
 740
 741        if (qi->qi_uquotaip) {
 742                IRELE(qi->qi_uquotaip);
 743                qi->qi_uquotaip = NULL; /* paranoia */
 744        }
 745        if (qi->qi_gquotaip) {
 746                IRELE(qi->qi_gquotaip);
 747                qi->qi_gquotaip = NULL;
 748        }
 749        mutex_destroy(&qi->qi_quotaofflock);
 750        kmem_free(qi);
 751        mp->m_quotainfo = NULL;
 752}
 753
 754/*
 755 * Create an inode and return with a reference already taken, but unlocked
 756 * This is how we create quota inodes
 757 */
 758STATIC int
 759xfs_qm_qino_alloc(
 760        xfs_mount_t     *mp,
 761        xfs_inode_t     **ip,
 762        __int64_t       sbfields,
 763        uint            flags)
 764{
 765        xfs_trans_t     *tp;
 766        int             error;
 767        int             committed;
 768
 769        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
 770        if ((error = xfs_trans_reserve(tp,
 771                                      XFS_QM_QINOCREATE_SPACE_RES(mp),
 772                                      XFS_CREATE_LOG_RES(mp), 0,
 773                                      XFS_TRANS_PERM_LOG_RES,
 774                                      XFS_CREATE_LOG_COUNT))) {
 775                xfs_trans_cancel(tp, 0);
 776                return error;
 777        }
 778
 779        error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
 780        if (error) {
 781                xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
 782                                 XFS_TRANS_ABORT);
 783                return error;
 784        }
 785
 786        /*
 787         * Make the changes in the superblock, and log those too.
 788         * sbfields arg may contain fields other than *QUOTINO;
 789         * VERSIONNUM for example.
 790         */
 791        spin_lock(&mp->m_sb_lock);
 792        if (flags & XFS_QMOPT_SBVERSION) {
 793                ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 794                ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
 795                                   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
 796                       (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
 797                        XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
 798
 799                xfs_sb_version_addquota(&mp->m_sb);
 800                mp->m_sb.sb_uquotino = NULLFSINO;
 801                mp->m_sb.sb_gquotino = NULLFSINO;
 802
 803                /* qflags will get updated _after_ quotacheck */
 804                mp->m_sb.sb_qflags = 0;
 805        }
 806        if (flags & XFS_QMOPT_UQUOTA)
 807                mp->m_sb.sb_uquotino = (*ip)->i_ino;
 808        else
 809                mp->m_sb.sb_gquotino = (*ip)->i_ino;
 810        spin_unlock(&mp->m_sb_lock);
 811        xfs_mod_sb(tp, sbfields);
 812
 813        if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
 814                xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 815                return error;
 816        }
 817        return 0;
 818}
 819
 820
 821STATIC void
 822xfs_qm_reset_dqcounts(
 823        xfs_mount_t     *mp,
 824        xfs_buf_t       *bp,
 825        xfs_dqid_t      id,
 826        uint            type)
 827{
 828        xfs_disk_dquot_t        *ddq;
 829        int                     j;
 830
 831        trace_xfs_reset_dqcounts(bp, _RET_IP_);
 832
 833        /*
 834         * Reset all counters and timers. They'll be
 835         * started afresh by xfs_qm_quotacheck.
 836         */
 837#ifdef DEBUG
 838        j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 839        do_div(j, sizeof(xfs_dqblk_t));
 840        ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 841#endif
 842        ddq = bp->b_addr;
 843        for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 844                /*
 845                 * Do a sanity check, and if needed, repair the dqblk. Don't
 846                 * output any warnings because it's perfectly possible to
 847                 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
 848                 */
 849                (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
 850                                      "xfs_quotacheck");
 851                ddq->d_bcount = 0;
 852                ddq->d_icount = 0;
 853                ddq->d_rtbcount = 0;
 854                ddq->d_btimer = 0;
 855                ddq->d_itimer = 0;
 856                ddq->d_rtbtimer = 0;
 857                ddq->d_bwarns = 0;
 858                ddq->d_iwarns = 0;
 859                ddq->d_rtbwarns = 0;
 860                ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
 861        }
 862}
 863
 864STATIC int
 865xfs_qm_dqiter_bufs(
 866        struct xfs_mount        *mp,
 867        xfs_dqid_t              firstid,
 868        xfs_fsblock_t           bno,
 869        xfs_filblks_t           blkcnt,
 870        uint                    flags,
 871        struct list_head        *buffer_list)
 872{
 873        struct xfs_buf          *bp;
 874        int                     error;
 875        int                     type;
 876
 877        ASSERT(blkcnt > 0);
 878        type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
 879                (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
 880        error = 0;
 881
 882        /*
 883         * Blkcnt arg can be a very big number, and might even be
 884         * larger than the log itself. So, we have to break it up into
 885         * manageable-sized transactions.
 886         * Note that we don't start a permanent transaction here; we might
 887         * not be able to get a log reservation for the whole thing up front,
 888         * and we don't really care to either, because we just discard
 889         * everything if we were to crash in the middle of this loop.
 890         */
 891        while (blkcnt--) {
 892                error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 893                              XFS_FSB_TO_DADDR(mp, bno),
 894                              mp->m_quotainfo->qi_dqchunklen, 0, &bp);
 895                if (error)
 896                        break;
 897
 898                xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 899                xfs_buf_delwri_queue(bp, buffer_list);
 900                xfs_buf_relse(bp);
 901                /*
 902                 * goto the next block.
 903                 */
 904                bno++;
 905                firstid += mp->m_quotainfo->qi_dqperchunk;
 906        }
 907
 908        return error;
 909}
 910
 911/*
 912 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
 913 * caller supplied function for every chunk of dquots that we find.
 914 */
 915STATIC int
 916xfs_qm_dqiterate(
 917        struct xfs_mount        *mp,
 918        struct xfs_inode        *qip,
 919        uint                    flags,
 920        struct list_head        *buffer_list)
 921{
 922        struct xfs_bmbt_irec    *map;
 923        int                     i, nmaps;       /* number of map entries */
 924        int                     error;          /* return value */
 925        xfs_fileoff_t           lblkno;
 926        xfs_filblks_t           maxlblkcnt;
 927        xfs_dqid_t              firstid;
 928        xfs_fsblock_t           rablkno;
 929        xfs_filblks_t           rablkcnt;
 930
 931        error = 0;
 932        /*
 933         * This looks racy, but we can't keep an inode lock across a
 934         * trans_reserve. But, this gets called during quotacheck, and that
 935         * happens only at mount time which is single threaded.
 936         */
 937        if (qip->i_d.di_nblocks == 0)
 938                return 0;
 939
 940        map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
 941
 942        lblkno = 0;
 943        maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 944        do {
 945                nmaps = XFS_DQITER_MAP_SIZE;
 946                /*
 947                 * We aren't changing the inode itself. Just changing
 948                 * some of its data. No new blocks are added here, and
 949                 * the inode is never added to the transaction.
 950                 */
 951                xfs_ilock(qip, XFS_ILOCK_SHARED);
 952                error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
 953                                       map, &nmaps, 0);
 954                xfs_iunlock(qip, XFS_ILOCK_SHARED);
 955                if (error)
 956                        break;
 957
 958                ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
 959                for (i = 0; i < nmaps; i++) {
 960                        ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
 961                        ASSERT(map[i].br_blockcount);
 962
 963
 964                        lblkno += map[i].br_blockcount;
 965
 966                        if (map[i].br_startblock == HOLESTARTBLOCK)
 967                                continue;
 968
 969                        firstid = (xfs_dqid_t) map[i].br_startoff *
 970                                mp->m_quotainfo->qi_dqperchunk;
 971                        /*
 972                         * Do a read-ahead on the next extent.
 973                         */
 974                        if ((i+1 < nmaps) &&
 975                            (map[i+1].br_startblock != HOLESTARTBLOCK)) {
 976                                rablkcnt =  map[i+1].br_blockcount;
 977                                rablkno = map[i+1].br_startblock;
 978                                while (rablkcnt--) {
 979                                        xfs_buf_readahead(mp->m_ddev_targp,
 980                                               XFS_FSB_TO_DADDR(mp, rablkno),
 981                                               mp->m_quotainfo->qi_dqchunklen);
 982                                        rablkno++;
 983                                }
 984                        }
 985                        /*
 986                         * Iterate thru all the blks in the extent and
 987                         * reset the counters of all the dquots inside them.
 988                         */
 989                        error = xfs_qm_dqiter_bufs(mp, firstid,
 990                                                   map[i].br_startblock,
 991                                                   map[i].br_blockcount,
 992                                                   flags, buffer_list);
 993                        if (error)
 994                                goto out;
 995                }
 996        } while (nmaps > 0);
 997
 998out:
 999        kmem_free(map);
1000        return error;
1001}
1002
1003/*
1004 * Called by dqusage_adjust in doing a quotacheck.
1005 *
1006 * Given the inode, and a dquot id this updates both the incore dqout as well
1007 * as the buffer copy. This is so that once the quotacheck is done, we can
1008 * just log all the buffers, as opposed to logging numerous updates to
1009 * individual dquots.
1010 */
1011STATIC int
1012xfs_qm_quotacheck_dqadjust(
1013        struct xfs_inode        *ip,
1014        xfs_dqid_t              id,
1015        uint                    type,
1016        xfs_qcnt_t              nblks,
1017        xfs_qcnt_t              rtblks)
1018{
1019        struct xfs_mount        *mp = ip->i_mount;
1020        struct xfs_dquot        *dqp;
1021        int                     error;
1022
1023        error = xfs_qm_dqget(mp, ip, id, type,
1024                             XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1025        if (error) {
1026                /*
1027                 * Shouldn't be able to turn off quotas here.
1028                 */
1029                ASSERT(error != ESRCH);
1030                ASSERT(error != ENOENT);
1031                return error;
1032        }
1033
1034        trace_xfs_dqadjust(dqp);
1035
1036        /*
1037         * Adjust the inode count and the block count to reflect this inode's
1038         * resource usage.
1039         */
1040        be64_add_cpu(&dqp->q_core.d_icount, 1);
1041        dqp->q_res_icount++;
1042        if (nblks) {
1043                be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1044                dqp->q_res_bcount += nblks;
1045        }
1046        if (rtblks) {
1047                be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1048                dqp->q_res_rtbcount += rtblks;
1049        }
1050
1051        /*
1052         * Set default limits, adjust timers (since we changed usages)
1053         *
1054         * There are no timers for the default values set in the root dquot.
1055         */
1056        if (dqp->q_core.d_id) {
1057                xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
1058                xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1059        }
1060
1061        dqp->dq_flags |= XFS_DQ_DIRTY;
1062        xfs_qm_dqput(dqp);
1063        return 0;
1064}
1065
1066STATIC int
1067xfs_qm_get_rtblks(
1068        xfs_inode_t     *ip,
1069        xfs_qcnt_t      *O_rtblks)
1070{
1071        xfs_filblks_t   rtblks;                 /* total rt blks */
1072        xfs_extnum_t    idx;                    /* extent record index */
1073        xfs_ifork_t     *ifp;                   /* inode fork pointer */
1074        xfs_extnum_t    nextents;               /* number of extent entries */
1075        int             error;
1076
1077        ASSERT(XFS_IS_REALTIME_INODE(ip));
1078        ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1079        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1080                if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1081                        return error;
1082        }
1083        rtblks = 0;
1084        nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1085        for (idx = 0; idx < nextents; idx++)
1086                rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1087        *O_rtblks = (xfs_qcnt_t)rtblks;
1088        return 0;
1089}
1090
1091/*
1092 * callback routine supplied to bulkstat(). Given an inumber, find its
1093 * dquots and update them to account for resources taken by that inode.
1094 */
1095/* ARGSUSED */
1096STATIC int
1097xfs_qm_dqusage_adjust(
1098        xfs_mount_t     *mp,            /* mount point for filesystem */
1099        xfs_ino_t       ino,            /* inode number to get data for */
1100        void            __user *buffer, /* not used */
1101        int             ubsize,         /* not used */
1102        int             *ubused,        /* not used */
1103        int             *res)           /* result code value */
1104{
1105        xfs_inode_t     *ip;
1106        xfs_qcnt_t      nblks, rtblks = 0;
1107        int             error;
1108
1109        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1110
1111        /*
1112         * rootino must have its resources accounted for, not so with the quota
1113         * inodes.
1114         */
1115        if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1116                *res = BULKSTAT_RV_NOTHING;
1117                return XFS_ERROR(EINVAL);
1118        }
1119
1120        /*
1121         * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1122         * interface expects the inode to be exclusively locked because that's
1123         * the case in all other instances. It's OK that we do this because
1124         * quotacheck is done only at mount time.
1125         */
1126        error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1127        if (error) {
1128                *res = BULKSTAT_RV_NOTHING;
1129                return error;
1130        }
1131
1132        ASSERT(ip->i_delayed_blks == 0);
1133
1134        if (XFS_IS_REALTIME_INODE(ip)) {
1135                /*
1136                 * Walk thru the extent list and count the realtime blocks.
1137                 */
1138                error = xfs_qm_get_rtblks(ip, &rtblks);
1139                if (error)
1140                        goto error0;
1141        }
1142
1143        nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1144
1145        /*
1146         * Add the (disk blocks and inode) resources occupied by this
1147         * inode to its dquots. We do this adjustment in the incore dquot,
1148         * and also copy the changes to its buffer.
1149         * We don't care about putting these changes in a transaction
1150         * envelope because if we crash in the middle of a 'quotacheck'
1151         * we have to start from the beginning anyway.
1152         * Once we're done, we'll log all the dquot bufs.
1153         *
1154         * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1155         * and quotaoffs don't race. (Quotachecks happen at mount time only).
1156         */
1157        if (XFS_IS_UQUOTA_ON(mp)) {
1158                error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1159                                                   XFS_DQ_USER, nblks, rtblks);
1160                if (error)
1161                        goto error0;
1162        }
1163
1164        if (XFS_IS_GQUOTA_ON(mp)) {
1165                error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1166                                                   XFS_DQ_GROUP, nblks, rtblks);
1167                if (error)
1168                        goto error0;
1169        }
1170
1171        if (XFS_IS_PQUOTA_ON(mp)) {
1172                error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1173                                                   XFS_DQ_PROJ, nblks, rtblks);
1174                if (error)
1175                        goto error0;
1176        }
1177
1178        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1179        IRELE(ip);
1180        *res = BULKSTAT_RV_DIDONE;
1181        return 0;
1182
1183error0:
1184        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185        IRELE(ip);
1186        *res = BULKSTAT_RV_GIVEUP;
1187        return error;
1188}
1189
1190STATIC int
1191xfs_qm_flush_one(
1192        struct xfs_dquot        *dqp,
1193        void                    *data)
1194{
1195        struct list_head        *buffer_list = data;
1196        struct xfs_buf          *bp = NULL;
1197        int                     error = 0;
1198
1199        xfs_dqlock(dqp);
1200        if (dqp->dq_flags & XFS_DQ_FREEING)
1201                goto out_unlock;
1202        if (!XFS_DQ_IS_DIRTY(dqp))
1203                goto out_unlock;
1204
1205        xfs_dqflock(dqp);
1206        error = xfs_qm_dqflush(dqp, &bp);
1207        if (error)
1208                goto out_unlock;
1209
1210        xfs_buf_delwri_queue(bp, buffer_list);
1211        xfs_buf_relse(bp);
1212out_unlock:
1213        xfs_dqunlock(dqp);
1214        return error;
1215}
1216
1217/*
1218 * Walk thru all the filesystem inodes and construct a consistent view
1219 * of the disk quota world. If the quotacheck fails, disable quotas.
1220 */
1221int
1222xfs_qm_quotacheck(
1223        xfs_mount_t     *mp)
1224{
1225        int             done, count, error, error2;
1226        xfs_ino_t       lastino;
1227        size_t          structsz;
1228        xfs_inode_t     *uip, *gip;
1229        uint            flags;
1230        LIST_HEAD       (buffer_list);
1231
1232        count = INT_MAX;
1233        structsz = 1;
1234        lastino = 0;
1235        flags = 0;
1236
1237        ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
1238        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1239
1240        xfs_notice(mp, "Quotacheck needed: Please wait.");
1241
1242        /*
1243         * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1244         * their counters to zero. We need a clean slate.
1245         * We don't log our changes till later.
1246         */
1247        uip = mp->m_quotainfo->qi_uquotaip;
1248        if (uip) {
1249                error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1250                                         &buffer_list);
1251                if (error)
1252                        goto error_return;
1253                flags |= XFS_UQUOTA_CHKD;
1254        }
1255
1256        gip = mp->m_quotainfo->qi_gquotaip;
1257        if (gip) {
1258                error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
1259                                         XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA,
1260                                         &buffer_list);
1261                if (error)
1262                        goto error_return;
1263                flags |= XFS_OQUOTA_CHKD;
1264        }
1265
1266        do {
1267                /*
1268                 * Iterate thru all the inodes in the file system,
1269                 * adjusting the corresponding dquot counters in core.
1270                 */
1271                error = xfs_bulkstat(mp, &lastino, &count,
1272                                     xfs_qm_dqusage_adjust,
1273                                     structsz, NULL, &done);
1274                if (error)
1275                        break;
1276
1277        } while (!done);
1278
1279        /*
1280         * We've made all the changes that we need to make incore.  Flush them
1281         * down to disk buffers if everything was updated successfully.
1282         */
1283        if (XFS_IS_UQUOTA_ON(mp)) {
1284                error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1285                                          &buffer_list);
1286        }
1287        if (XFS_IS_GQUOTA_ON(mp)) {
1288                error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1289                                           &buffer_list);
1290                if (!error)
1291                        error = error2;
1292        }
1293        if (XFS_IS_PQUOTA_ON(mp)) {
1294                error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1295                                           &buffer_list);
1296                if (!error)
1297                        error = error2;
1298        }
1299
1300        error2 = xfs_buf_delwri_submit(&buffer_list);
1301        if (!error)
1302                error = error2;
1303
1304        /*
1305         * We can get this error if we couldn't do a dquot allocation inside
1306         * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1307         * dirty dquots that might be cached, we just want to get rid of them
1308         * and turn quotaoff. The dquots won't be attached to any of the inodes
1309         * at this point (because we intentionally didn't in dqget_noattach).
1310         */
1311        if (error) {
1312                xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1313                goto error_return;
1314        }
1315
1316        /*
1317         * If one type of quotas is off, then it will lose its
1318         * quotachecked status, since we won't be doing accounting for
1319         * that type anymore.
1320         */
1321        mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1322        mp->m_qflags |= flags;
1323
1324 error_return:
1325        while (!list_empty(&buffer_list)) {
1326                struct xfs_buf *bp =
1327                        list_first_entry(&buffer_list, struct xfs_buf, b_list);
1328                list_del_init(&bp->b_list);
1329                xfs_buf_relse(bp);
1330        }
1331
1332        if (error) {
1333                xfs_warn(mp,
1334        "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1335                        error);
1336                /*
1337                 * We must turn off quotas.
1338                 */
1339                ASSERT(mp->m_quotainfo != NULL);
1340                xfs_qm_destroy_quotainfo(mp);
1341                if (xfs_mount_reset_sbqflags(mp)) {
1342                        xfs_warn(mp,
1343                                "Quotacheck: Failed to reset quota flags.");
1344                }
1345        } else
1346                xfs_notice(mp, "Quotacheck: Done.");
1347        return (error);
1348}
1349
1350/*
1351 * This is called after the superblock has been read in and we're ready to
1352 * iget the quota inodes.
1353 */
1354STATIC int
1355xfs_qm_init_quotainos(
1356        xfs_mount_t     *mp)
1357{
1358        xfs_inode_t     *uip, *gip;
1359        int             error;
1360        __int64_t       sbflags;
1361        uint            flags;
1362
1363        ASSERT(mp->m_quotainfo);
1364        uip = gip = NULL;
1365        sbflags = 0;
1366        flags = 0;
1367
1368        /*
1369         * Get the uquota and gquota inodes
1370         */
1371        if (xfs_sb_version_hasquota(&mp->m_sb)) {
1372                if (XFS_IS_UQUOTA_ON(mp) &&
1373                    mp->m_sb.sb_uquotino != NULLFSINO) {
1374                        ASSERT(mp->m_sb.sb_uquotino > 0);
1375                        if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1376                                             0, 0, &uip)))
1377                                return XFS_ERROR(error);
1378                }
1379                if (XFS_IS_OQUOTA_ON(mp) &&
1380                    mp->m_sb.sb_gquotino != NULLFSINO) {
1381                        ASSERT(mp->m_sb.sb_gquotino > 0);
1382                        if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1383                                             0, 0, &gip))) {
1384                                if (uip)
1385                                        IRELE(uip);
1386                                return XFS_ERROR(error);
1387                        }
1388                }
1389        } else {
1390                flags |= XFS_QMOPT_SBVERSION;
1391                sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1392                            XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
1393        }
1394
1395        /*
1396         * Create the two inodes, if they don't exist already. The changes
1397         * made above will get added to a transaction and logged in one of
1398         * the qino_alloc calls below.  If the device is readonly,
1399         * temporarily switch to read-write to do this.
1400         */
1401        if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1402                if ((error = xfs_qm_qino_alloc(mp, &uip,
1403                                              sbflags | XFS_SB_UQUOTINO,
1404                                              flags | XFS_QMOPT_UQUOTA)))
1405                        return XFS_ERROR(error);
1406
1407                flags &= ~XFS_QMOPT_SBVERSION;
1408        }
1409        if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
1410                flags |= (XFS_IS_GQUOTA_ON(mp) ?
1411                                XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
1412                error = xfs_qm_qino_alloc(mp, &gip,
1413                                          sbflags | XFS_SB_GQUOTINO, flags);
1414                if (error) {
1415                        if (uip)
1416                                IRELE(uip);
1417
1418                        return XFS_ERROR(error);
1419                }
1420        }
1421
1422        mp->m_quotainfo->qi_uquotaip = uip;
1423        mp->m_quotainfo->qi_gquotaip = gip;
1424
1425        return 0;
1426}
1427
1428STATIC void
1429xfs_qm_dqfree_one(
1430        struct xfs_dquot        *dqp)
1431{
1432        struct xfs_mount        *mp = dqp->q_mount;
1433        struct xfs_quotainfo    *qi = mp->m_quotainfo;
1434
1435        mutex_lock(&qi->qi_tree_lock);
1436        radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
1437                          be32_to_cpu(dqp->q_core.d_id));
1438
1439        qi->qi_dquots--;
1440        mutex_unlock(&qi->qi_tree_lock);
1441
1442        xfs_qm_dqdestroy(dqp);
1443}
1444
1445STATIC void
1446xfs_qm_dqreclaim_one(
1447        struct xfs_dquot        *dqp,
1448        struct list_head        *buffer_list,
1449        struct list_head        *dispose_list)
1450{
1451        struct xfs_mount        *mp = dqp->q_mount;
1452        struct xfs_quotainfo    *qi = mp->m_quotainfo;
1453        int                     error;
1454
1455        if (!xfs_dqlock_nowait(dqp))
1456                goto out_move_tail;
1457
1458        /*
1459         * This dquot has acquired a reference in the meantime remove it from
1460         * the freelist and try again.
1461         */
1462        if (dqp->q_nrefs) {
1463                xfs_dqunlock(dqp);
1464
1465                trace_xfs_dqreclaim_want(dqp);
1466                XFS_STATS_INC(xs_qm_dqwants);
1467
1468                list_del_init(&dqp->q_lru);
1469                qi->qi_lru_count--;
1470                XFS_STATS_DEC(xs_qm_dquot_unused);
1471                return;
1472        }
1473
1474        /*
1475         * Try to grab the flush lock. If this dquot is in the process of
1476         * getting flushed to disk, we don't want to reclaim it.
1477         */
1478        if (!xfs_dqflock_nowait(dqp))
1479                goto out_unlock_move_tail;
1480
1481        if (XFS_DQ_IS_DIRTY(dqp)) {
1482                struct xfs_buf  *bp = NULL;
1483
1484                trace_xfs_dqreclaim_dirty(dqp);
1485
1486                error = xfs_qm_dqflush(dqp, &bp);
1487                if (error) {
1488                        xfs_warn(mp, "%s: dquot %p flush failed",
1489                                 __func__, dqp);
1490                        goto out_unlock_move_tail;
1491                }
1492
1493                xfs_buf_delwri_queue(bp, buffer_list);
1494                xfs_buf_relse(bp);
1495                /*
1496                 * Give the dquot another try on the freelist, as the
1497                 * flushing will take some time.
1498                 */
1499                goto out_unlock_move_tail;
1500        }
1501        xfs_dqfunlock(dqp);
1502
1503        /*
1504         * Prevent lookups now that we are past the point of no return.
1505         */
1506        dqp->dq_flags |= XFS_DQ_FREEING;
1507        xfs_dqunlock(dqp);
1508
1509        ASSERT(dqp->q_nrefs == 0);
1510        list_move_tail(&dqp->q_lru, dispose_list);
1511        qi->qi_lru_count--;
1512        XFS_STATS_DEC(xs_qm_dquot_unused);
1513
1514        trace_xfs_dqreclaim_done(dqp);
1515        XFS_STATS_INC(xs_qm_dqreclaims);
1516        return;
1517
1518        /*
1519         * Move the dquot to the tail of the list so that we don't spin on it.
1520         */
1521out_unlock_move_tail:
1522        xfs_dqunlock(dqp);
1523out_move_tail:
1524        list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
1525        trace_xfs_dqreclaim_busy(dqp);
1526        XFS_STATS_INC(xs_qm_dqreclaim_misses);
1527}
1528
1529STATIC int
1530xfs_qm_shake(
1531        struct shrinker         *shrink,
1532        struct shrink_control   *sc)
1533{
1534        struct xfs_quotainfo    *qi =
1535                container_of(shrink, struct xfs_quotainfo, qi_shrinker);
1536        int                     nr_to_scan = sc->nr_to_scan;
1537        LIST_HEAD               (buffer_list);
1538        LIST_HEAD               (dispose_list);
1539        struct xfs_dquot        *dqp;
1540        int                     error;
1541
1542        if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1543                return 0;
1544        if (!nr_to_scan)
1545                goto out;
1546
1547        mutex_lock(&qi->qi_lru_lock);
1548        while (!list_empty(&qi->qi_lru_list)) {
1549                if (nr_to_scan-- <= 0)
1550                        break;
1551                dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
1552                                       q_lru);
1553                xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
1554        }
1555        mutex_unlock(&qi->qi_lru_lock);
1556
1557        error = xfs_buf_delwri_submit(&buffer_list);
1558        if (error)
1559                xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
1560
1561        while (!list_empty(&dispose_list)) {
1562                dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
1563                list_del_init(&dqp->q_lru);
1564                xfs_qm_dqfree_one(dqp);
1565        }
1566
1567out:
1568        return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
1569}
1570
1571/*
1572 * Start a transaction and write the incore superblock changes to
1573 * disk. flags parameter indicates which fields have changed.
1574 */
1575int
1576xfs_qm_write_sb_changes(
1577        xfs_mount_t     *mp,
1578        __int64_t       flags)
1579{
1580        xfs_trans_t     *tp;
1581        int             error;
1582
1583        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1584        if ((error = xfs_trans_reserve(tp, 0,
1585                                      mp->m_sb.sb_sectsize + 128, 0,
1586                                      0,
1587                                      XFS_DEFAULT_LOG_COUNT))) {
1588                xfs_trans_cancel(tp, 0);
1589                return error;
1590        }
1591
1592        xfs_mod_sb(tp, flags);
1593        error = xfs_trans_commit(tp, 0);
1594
1595        return error;
1596}
1597
1598
1599/* --------------- utility functions for vnodeops ---------------- */
1600
1601
1602/*
1603 * Given an inode, a uid, gid and prid make sure that we have
1604 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1605 * quotas by creating this file.
1606 * This also attaches dquot(s) to the given inode after locking it,
1607 * and returns the dquots corresponding to the uid and/or gid.
1608 *
1609 * in   : inode (unlocked)
1610 * out  : udquot, gdquot with references taken and unlocked
1611 */
1612int
1613xfs_qm_vop_dqalloc(
1614        struct xfs_inode        *ip,
1615        uid_t                   uid,
1616        gid_t                   gid,
1617        prid_t                  prid,
1618        uint                    flags,
1619        struct xfs_dquot        **O_udqpp,
1620        struct xfs_dquot        **O_gdqpp)
1621{
1622        struct xfs_mount        *mp = ip->i_mount;
1623        struct xfs_dquot        *uq, *gq;
1624        int                     error;
1625        uint                    lockflags;
1626
1627        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1628                return 0;
1629
1630        lockflags = XFS_ILOCK_EXCL;
1631        xfs_ilock(ip, lockflags);
1632
1633        if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1634                gid = ip->i_d.di_gid;
1635
1636        /*
1637         * Attach the dquot(s) to this inode, doing a dquot allocation
1638         * if necessary. The dquot(s) will not be locked.
1639         */
1640        if (XFS_NOT_DQATTACHED(mp, ip)) {
1641                error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1642                if (error) {
1643                        xfs_iunlock(ip, lockflags);
1644                        return error;
1645                }
1646        }
1647
1648        uq = gq = NULL;
1649        if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1650                if (ip->i_d.di_uid != uid) {
1651                        /*
1652                         * What we need is the dquot that has this uid, and
1653                         * if we send the inode to dqget, the uid of the inode
1654                         * takes priority over what's sent in the uid argument.
1655                         * We must unlock inode here before calling dqget if
1656                         * we're not sending the inode, because otherwise
1657                         * we'll deadlock by doing trans_reserve while
1658                         * holding ilock.
1659                         */
1660                        xfs_iunlock(ip, lockflags);
1661                        if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
1662                                                 XFS_DQ_USER,
1663                                                 XFS_QMOPT_DQALLOC |
1664                                                 XFS_QMOPT_DOWARN,
1665                                                 &uq))) {
1666                                ASSERT(error != ENOENT);
1667                                return error;
1668                        }
1669                        /*
1670                         * Get the ilock in the right order.
1671                         */
1672                        xfs_dqunlock(uq);
1673                        lockflags = XFS_ILOCK_SHARED;
1674                        xfs_ilock(ip, lockflags);
1675                } else {
1676                        /*
1677                         * Take an extra reference, because we'll return
1678                         * this to caller
1679                         */
1680                        ASSERT(ip->i_udquot);
1681                        uq = xfs_qm_dqhold(ip->i_udquot);
1682                }
1683        }
1684        if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1685                if (ip->i_d.di_gid != gid) {
1686                        xfs_iunlock(ip, lockflags);
1687                        if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
1688                                                 XFS_DQ_GROUP,
1689                                                 XFS_QMOPT_DQALLOC |
1690                                                 XFS_QMOPT_DOWARN,
1691                                                 &gq))) {
1692                                if (uq)
1693                                        xfs_qm_dqrele(uq);
1694                                ASSERT(error != ENOENT);
1695                                return error;
1696                        }
1697                        xfs_dqunlock(gq);
1698                        lockflags = XFS_ILOCK_SHARED;
1699                        xfs_ilock(ip, lockflags);
1700                } else {
1701                        ASSERT(ip->i_gdquot);
1702                        gq = xfs_qm_dqhold(ip->i_gdquot);
1703                }
1704        } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1705                if (xfs_get_projid(ip) != prid) {
1706                        xfs_iunlock(ip, lockflags);
1707                        if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1708                                                 XFS_DQ_PROJ,
1709                                                 XFS_QMOPT_DQALLOC |
1710                                                 XFS_QMOPT_DOWARN,
1711                                                 &gq))) {
1712                                if (uq)
1713                                        xfs_qm_dqrele(uq);
1714                                ASSERT(error != ENOENT);
1715                                return (error);
1716                        }
1717                        xfs_dqunlock(gq);
1718                        lockflags = XFS_ILOCK_SHARED;
1719                        xfs_ilock(ip, lockflags);
1720                } else {
1721                        ASSERT(ip->i_gdquot);
1722                        gq = xfs_qm_dqhold(ip->i_gdquot);
1723                }
1724        }
1725        if (uq)
1726                trace_xfs_dquot_dqalloc(ip);
1727
1728        xfs_iunlock(ip, lockflags);
1729        if (O_udqpp)
1730                *O_udqpp = uq;
1731        else if (uq)
1732                xfs_qm_dqrele(uq);
1733        if (O_gdqpp)
1734                *O_gdqpp = gq;
1735        else if (gq)
1736                xfs_qm_dqrele(gq);
1737        return 0;
1738}
1739
1740/*
1741 * Actually transfer ownership, and do dquot modifications.
1742 * These were already reserved.
1743 */
1744xfs_dquot_t *
1745xfs_qm_vop_chown(
1746        xfs_trans_t     *tp,
1747        xfs_inode_t     *ip,
1748        xfs_dquot_t     **IO_olddq,
1749        xfs_dquot_t     *newdq)
1750{
1751        xfs_dquot_t     *prevdq;
1752        uint            bfield = XFS_IS_REALTIME_INODE(ip) ?
1753                                 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1754
1755
1756        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1757        ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1758
1759        /* old dquot */
1760        prevdq = *IO_olddq;
1761        ASSERT(prevdq);
1762        ASSERT(prevdq != newdq);
1763
1764        xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1765        xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1766
1767        /* the sparkling new dquot */
1768        xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1769        xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1770
1771        /*
1772         * Take an extra reference, because the inode is going to keep
1773         * this dquot pointer even after the trans_commit.
1774         */
1775        *IO_olddq = xfs_qm_dqhold(newdq);
1776
1777        return prevdq;
1778}
1779
1780/*
1781 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1782 */
1783int
1784xfs_qm_vop_chown_reserve(
1785        xfs_trans_t     *tp,
1786        xfs_inode_t     *ip,
1787        xfs_dquot_t     *udqp,
1788        xfs_dquot_t     *gdqp,
1789        uint            flags)
1790{
1791        xfs_mount_t     *mp = ip->i_mount;
1792        uint            delblks, blkflags, prjflags = 0;
1793        xfs_dquot_t     *unresudq, *unresgdq, *delblksudq, *delblksgdq;
1794        int             error;
1795
1796
1797        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1798        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1799
1800        delblks = ip->i_delayed_blks;
1801        delblksudq = delblksgdq = unresudq = unresgdq = NULL;
1802        blkflags = XFS_IS_REALTIME_INODE(ip) ?
1803                        XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1804
1805        if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1806            ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
1807                delblksudq = udqp;
1808                /*
1809                 * If there are delayed allocation blocks, then we have to
1810                 * unreserve those from the old dquot, and add them to the
1811                 * new dquot.
1812                 */
1813                if (delblks) {
1814                        ASSERT(ip->i_udquot);
1815                        unresudq = ip->i_udquot;
1816                }
1817        }
1818        if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
1819                if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
1820                     xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
1821                        prjflags = XFS_QMOPT_ENOSPC;
1822
1823                if (prjflags ||
1824                    (XFS_IS_GQUOTA_ON(ip->i_mount) &&
1825                     ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
1826                        delblksgdq = gdqp;
1827                        if (delblks) {
1828                                ASSERT(ip->i_gdquot);
1829                                unresgdq = ip->i_gdquot;
1830                        }
1831                }
1832        }
1833
1834        if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1835                                delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
1836                                flags | blkflags | prjflags)))
1837                return (error);
1838
1839        /*
1840         * Do the delayed blks reservations/unreservations now. Since, these
1841         * are done without the help of a transaction, if a reservation fails
1842         * its previous reservations won't be automatically undone by trans
1843         * code. So, we have to do it manually here.
1844         */
1845        if (delblks) {
1846                /*
1847                 * Do the reservations first. Unreservation can't fail.
1848                 */
1849                ASSERT(delblksudq || delblksgdq);
1850                ASSERT(unresudq || unresgdq);
1851                if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1852                                delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
1853                                flags | blkflags | prjflags)))
1854                        return (error);
1855                xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1856                                unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
1857                                blkflags);
1858        }
1859
1860        return (0);
1861}
1862
1863int
1864xfs_qm_vop_rename_dqattach(
1865        struct xfs_inode        **i_tab)
1866{
1867        struct xfs_mount        *mp = i_tab[0]->i_mount;
1868        int                     i;
1869
1870        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1871                return 0;
1872
1873        for (i = 0; (i < 4 && i_tab[i]); i++) {
1874                struct xfs_inode        *ip = i_tab[i];
1875                int                     error;
1876
1877                /*
1878                 * Watch out for duplicate entries in the table.
1879                 */
1880                if (i == 0 || ip != i_tab[i-1]) {
1881                        if (XFS_NOT_DQATTACHED(mp, ip)) {
1882                                error = xfs_qm_dqattach(ip, 0);
1883                                if (error)
1884                                        return error;
1885                        }
1886                }
1887        }
1888        return 0;
1889}
1890
1891void
1892xfs_qm_vop_create_dqattach(
1893        struct xfs_trans        *tp,
1894        struct xfs_inode        *ip,
1895        struct xfs_dquot        *udqp,
1896        struct xfs_dquot        *gdqp)
1897{
1898        struct xfs_mount        *mp = tp->t_mountp;
1899
1900        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1901                return;
1902
1903        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1904        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1905
1906        if (udqp) {
1907                ASSERT(ip->i_udquot == NULL);
1908                ASSERT(XFS_IS_UQUOTA_ON(mp));
1909                ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1910
1911                ip->i_udquot = xfs_qm_dqhold(udqp);
1912                xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1913        }
1914        if (gdqp) {
1915                ASSERT(ip->i_gdquot == NULL);
1916                ASSERT(XFS_IS_OQUOTA_ON(mp));
1917                ASSERT((XFS_IS_GQUOTA_ON(mp) ?
1918                        ip->i_d.di_gid : xfs_get_projid(ip)) ==
1919                                be32_to_cpu(gdqp->q_core.d_id));
1920
1921                ip->i_gdquot = xfs_qm_dqhold(gdqp);
1922                xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1923        }
1924}
1925
1926
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.