linux/fs/xfs/xfs_qm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_bit.h"
  21#include "xfs_log.h"
  22#include "xfs_trans.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_alloc.h"
  26#include "xfs_quota.h"
  27#include "xfs_mount.h"
  28#include "xfs_bmap_btree.h"
  29#include "xfs_ialloc_btree.h"
  30#include "xfs_dinode.h"
  31#include "xfs_inode.h"
  32#include "xfs_ialloc.h"
  33#include "xfs_itable.h"
  34#include "xfs_rtalloc.h"
  35#include "xfs_error.h"
  36#include "xfs_bmap.h"
  37#include "xfs_attr.h"
  38#include "xfs_buf_item.h"
  39#include "xfs_trans_space.h"
  40#include "xfs_utils.h"
  41#include "xfs_qm.h"
  42#include "xfs_trace.h"
  43#include "xfs_icache.h"
  44
  45/*
  46 * The global quota manager. There is only one of these for the entire
  47 * system, _not_ one per file system. XQM keeps track of the overall
  48 * quota functionality, including maintaining the freelist and hash
  49 * tables of dquots.
  50 */
  51STATIC int      xfs_qm_init_quotainos(xfs_mount_t *);
  52STATIC int      xfs_qm_init_quotainfo(xfs_mount_t *);
  53STATIC int      xfs_qm_shake(struct shrinker *, struct shrink_control *);
  54
  55/*
  56 * We use the batch lookup interface to iterate over the dquots as it
  57 * currently is the only interface into the radix tree code that allows
  58 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  59 * operations is fine as all callers are used either during mount/umount
  60 * or quotaoff.
  61 */
  62#define XFS_DQ_LOOKUP_BATCH     32
  63
  64STATIC int
  65xfs_qm_dquot_walk(
  66        struct xfs_mount        *mp,
  67        int                     type,
  68        int                     (*execute)(struct xfs_dquot *dqp, void *data),
  69        void                    *data)
  70{
  71        struct xfs_quotainfo    *qi = mp->m_quotainfo;
  72        struct radix_tree_root  *tree = XFS_DQUOT_TREE(qi, type);
  73        uint32_t                next_index;
  74        int                     last_error = 0;
  75        int                     skipped;
  76        int                     nr_found;
  77
  78restart:
  79        skipped = 0;
  80        next_index = 0;
  81        nr_found = 0;
  82
  83        while (1) {
  84                struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  85                int             error = 0;
  86                int             i;
  87
  88                mutex_lock(&qi->qi_tree_lock);
  89                nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  90                                        next_index, XFS_DQ_LOOKUP_BATCH);
  91                if (!nr_found) {
  92                        mutex_unlock(&qi->qi_tree_lock);
  93                        break;
  94                }
  95
  96                for (i = 0; i < nr_found; i++) {
  97                        struct xfs_dquot *dqp = batch[i];
  98
  99                        next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
 100
 101                        error = execute(batch[i], data);
 102                        if (error == EAGAIN) {
 103                                skipped++;
 104                                continue;
 105                        }
 106                        if (error && last_error != EFSCORRUPTED)
 107                                last_error = error;
 108                }
 109
 110                mutex_unlock(&qi->qi_tree_lock);
 111
 112                /* bail out if the filesystem is corrupted.  */
 113                if (last_error == EFSCORRUPTED) {
 114                        skipped = 0;
 115                        break;
 116                }
 117        }
 118
 119        if (skipped) {
 120                delay(1);
 121                goto restart;
 122        }
 123
 124        return last_error;
 125}
 126
 127
 128/*
 129 * Purge a dquot from all tracking data structures and free it.
 130 */
 131STATIC int
 132xfs_qm_dqpurge(
 133        struct xfs_dquot        *dqp,
 134        void                    *data)
 135{
 136        struct xfs_mount        *mp = dqp->q_mount;
 137        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 138        struct xfs_dquot        *gdqp = NULL;
 139
 140        xfs_dqlock(dqp);
 141        if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
 142                xfs_dqunlock(dqp);
 143                return EAGAIN;
 144        }
 145
 146        /*
 147         * If this quota has a group hint attached, prepare for releasing it
 148         * now.
 149         */
 150        gdqp = dqp->q_gdquot;
 151        if (gdqp) {
 152                xfs_dqlock(gdqp);
 153                dqp->q_gdquot = NULL;
 154        }
 155
 156        dqp->dq_flags |= XFS_DQ_FREEING;
 157
 158        xfs_dqflock(dqp);
 159
 160        /*
 161         * If we are turning this type of quotas off, we don't care
 162         * about the dirty metadata sitting in this dquot. OTOH, if
 163         * we're unmounting, we do care, so we flush it and wait.
 164         */
 165        if (XFS_DQ_IS_DIRTY(dqp)) {
 166                struct xfs_buf  *bp = NULL;
 167                int             error;
 168
 169                /*
 170                 * We don't care about getting disk errors here. We need
 171                 * to purge this dquot anyway, so we go ahead regardless.
 172                 */
 173                error = xfs_qm_dqflush(dqp, &bp);
 174                if (error) {
 175                        xfs_warn(mp, "%s: dquot %p flush failed",
 176                                __func__, dqp);
 177                } else {
 178                        error = xfs_bwrite(bp);
 179                        xfs_buf_relse(bp);
 180                }
 181                xfs_dqflock(dqp);
 182        }
 183
 184        ASSERT(atomic_read(&dqp->q_pincount) == 0);
 185        ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 186               !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
 187
 188        xfs_dqfunlock(dqp);
 189        xfs_dqunlock(dqp);
 190
 191        radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
 192                          be32_to_cpu(dqp->q_core.d_id));
 193        qi->qi_dquots--;
 194
 195        /*
 196         * We move dquots to the freelist as soon as their reference count
 197         * hits zero, so it really should be on the freelist here.
 198         */
 199        mutex_lock(&qi->qi_lru_lock);
 200        ASSERT(!list_empty(&dqp->q_lru));
 201        list_del_init(&dqp->q_lru);
 202        qi->qi_lru_count--;
 203        XFS_STATS_DEC(xs_qm_dquot_unused);
 204        mutex_unlock(&qi->qi_lru_lock);
 205
 206        xfs_qm_dqdestroy(dqp);
 207
 208        if (gdqp)
 209                xfs_qm_dqput(gdqp);
 210        return 0;
 211}
 212
 213/*
 214 * Purge the dquot cache.
 215 */
 216void
 217xfs_qm_dqpurge_all(
 218        struct xfs_mount        *mp,
 219        uint                    flags)
 220{
 221        if (flags & XFS_QMOPT_UQUOTA)
 222                xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
 223        if (flags & XFS_QMOPT_GQUOTA)
 224                xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
 225        if (flags & XFS_QMOPT_PQUOTA)
 226                xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
 227}
 228
 229/*
 230 * Just destroy the quotainfo structure.
 231 */
 232void
 233xfs_qm_unmount(
 234        struct xfs_mount        *mp)
 235{
 236        if (mp->m_quotainfo) {
 237                xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 238                xfs_qm_destroy_quotainfo(mp);
 239        }
 240}
 241
 242
 243/*
 244 * This is called from xfs_mountfs to start quotas and initialize all
 245 * necessary data structures like quotainfo.  This is also responsible for
 246 * running a quotacheck as necessary.  We are guaranteed that the superblock
 247 * is consistently read in at this point.
 248 *
 249 * If we fail here, the mount will continue with quota turned off. We don't
 250 * need to inidicate success or failure at all.
 251 */
 252void
 253xfs_qm_mount_quotas(
 254        xfs_mount_t     *mp)
 255{
 256        int             error = 0;
 257        uint            sbf;
 258
 259        /*
 260         * If quotas on realtime volumes is not supported, we disable
 261         * quotas immediately.
 262         */
 263        if (mp->m_sb.sb_rextents) {
 264                xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
 265                mp->m_qflags = 0;
 266                goto write_changes;
 267        }
 268
 269        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 270
 271        /*
 272         * Allocate the quotainfo structure inside the mount struct, and
 273         * create quotainode(s), and change/rev superblock if necessary.
 274         */
 275        error = xfs_qm_init_quotainfo(mp);
 276        if (error) {
 277                /*
 278                 * We must turn off quotas.
 279                 */
 280                ASSERT(mp->m_quotainfo == NULL);
 281                mp->m_qflags = 0;
 282                goto write_changes;
 283        }
 284        /*
 285         * If any of the quotas are not consistent, do a quotacheck.
 286         */
 287        if (XFS_QM_NEED_QUOTACHECK(mp)) {
 288                error = xfs_qm_quotacheck(mp);
 289                if (error) {
 290                        /* Quotacheck failed and disabled quotas. */
 291                        return;
 292                }
 293        }
 294        /* 
 295         * If one type of quotas is off, then it will lose its
 296         * quotachecked status, since we won't be doing accounting for
 297         * that type anymore.
 298         */
 299        if (!XFS_IS_UQUOTA_ON(mp))
 300                mp->m_qflags &= ~XFS_UQUOTA_CHKD;
 301        if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
 302                mp->m_qflags &= ~XFS_OQUOTA_CHKD;
 303
 304 write_changes:
 305        /*
 306         * We actually don't have to acquire the m_sb_lock at all.
 307         * This can only be called from mount, and that's single threaded. XXX
 308         */
 309        spin_lock(&mp->m_sb_lock);
 310        sbf = mp->m_sb.sb_qflags;
 311        mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
 312        spin_unlock(&mp->m_sb_lock);
 313
 314        if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
 315                if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
 316                        /*
 317                         * We could only have been turning quotas off.
 318                         * We aren't in very good shape actually because
 319                         * the incore structures are convinced that quotas are
 320                         * off, but the on disk superblock doesn't know that !
 321                         */
 322                        ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
 323                        xfs_alert(mp, "%s: Superblock update failed!",
 324                                __func__);
 325                }
 326        }
 327
 328        if (error) {
 329                xfs_warn(mp, "Failed to initialize disk quotas.");
 330                return;
 331        }
 332}
 333
 334/*
 335 * Called from the vfsops layer.
 336 */
 337void
 338xfs_qm_unmount_quotas(
 339        xfs_mount_t     *mp)
 340{
 341        /*
 342         * Release the dquots that root inode, et al might be holding,
 343         * before we flush quotas and blow away the quotainfo structure.
 344         */
 345        ASSERT(mp->m_rootip);
 346        xfs_qm_dqdetach(mp->m_rootip);
 347        if (mp->m_rbmip)
 348                xfs_qm_dqdetach(mp->m_rbmip);
 349        if (mp->m_rsumip)
 350                xfs_qm_dqdetach(mp->m_rsumip);
 351
 352        /*
 353         * Release the quota inodes.
 354         */
 355        if (mp->m_quotainfo) {
 356                if (mp->m_quotainfo->qi_uquotaip) {
 357                        IRELE(mp->m_quotainfo->qi_uquotaip);
 358                        mp->m_quotainfo->qi_uquotaip = NULL;
 359                }
 360                if (mp->m_quotainfo->qi_gquotaip) {
 361                        IRELE(mp->m_quotainfo->qi_gquotaip);
 362                        mp->m_quotainfo->qi_gquotaip = NULL;
 363                }
 364        }
 365}
 366
 367STATIC int
 368xfs_qm_dqattach_one(
 369        xfs_inode_t     *ip,
 370        xfs_dqid_t      id,
 371        uint            type,
 372        uint            doalloc,
 373        xfs_dquot_t     *udqhint, /* hint */
 374        xfs_dquot_t     **IO_idqpp)
 375{
 376        xfs_dquot_t     *dqp;
 377        int             error;
 378
 379        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 380        error = 0;
 381
 382        /*
 383         * See if we already have it in the inode itself. IO_idqpp is
 384         * &i_udquot or &i_gdquot. This made the code look weird, but
 385         * made the logic a lot simpler.
 386         */
 387        dqp = *IO_idqpp;
 388        if (dqp) {
 389                trace_xfs_dqattach_found(dqp);
 390                return 0;
 391        }
 392
 393        /*
 394         * udqhint is the i_udquot field in inode, and is non-NULL only
 395         * when the type arg is group/project. Its purpose is to save a
 396         * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
 397         * the user dquot.
 398         */
 399        if (udqhint) {
 400                ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
 401                xfs_dqlock(udqhint);
 402
 403                /*
 404                 * No need to take dqlock to look at the id.
 405                 *
 406                 * The ID can't change until it gets reclaimed, and it won't
 407                 * be reclaimed as long as we have a ref from inode and we
 408                 * hold the ilock.
 409                 */
 410                dqp = udqhint->q_gdquot;
 411                if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
 412                        ASSERT(*IO_idqpp == NULL);
 413
 414                        *IO_idqpp = xfs_qm_dqhold(dqp);
 415                        xfs_dqunlock(udqhint);
 416                        return 0;
 417                }
 418
 419                /*
 420                 * We can't hold a dquot lock when we call the dqget code.
 421                 * We'll deadlock in no time, because of (not conforming to)
 422                 * lock ordering - the inodelock comes before any dquot lock,
 423                 * and we may drop and reacquire the ilock in xfs_qm_dqget().
 424                 */
 425                xfs_dqunlock(udqhint);
 426        }
 427
 428        /*
 429         * Find the dquot from somewhere. This bumps the
 430         * reference count of dquot and returns it locked.
 431         * This can return ENOENT if dquot didn't exist on
 432         * disk and we didn't ask it to allocate;
 433         * ESRCH if quotas got turned off suddenly.
 434         */
 435        error = xfs_qm_dqget(ip->i_mount, ip, id, type,
 436                             doalloc | XFS_QMOPT_DOWARN, &dqp);
 437        if (error)
 438                return error;
 439
 440        trace_xfs_dqattach_get(dqp);
 441
 442        /*
 443         * dqget may have dropped and re-acquired the ilock, but it guarantees
 444         * that the dquot returned is the one that should go in the inode.
 445         */
 446        *IO_idqpp = dqp;
 447        xfs_dqunlock(dqp);
 448        return 0;
 449}
 450
 451
 452/*
 453 * Given a udquot and gdquot, attach a ptr to the group dquot in the
 454 * udquot as a hint for future lookups.
 455 */
 456STATIC void
 457xfs_qm_dqattach_grouphint(
 458        xfs_dquot_t     *udq,
 459        xfs_dquot_t     *gdq)
 460{
 461        xfs_dquot_t     *tmp;
 462
 463        xfs_dqlock(udq);
 464
 465        tmp = udq->q_gdquot;
 466        if (tmp) {
 467                if (tmp == gdq)
 468                        goto done;
 469
 470                udq->q_gdquot = NULL;
 471                xfs_qm_dqrele(tmp);
 472        }
 473
 474        udq->q_gdquot = xfs_qm_dqhold(gdq);
 475done:
 476        xfs_dqunlock(udq);
 477}
 478
 479static bool
 480xfs_qm_need_dqattach(
 481        struct xfs_inode        *ip)
 482{
 483        struct xfs_mount        *mp = ip->i_mount;
 484
 485        if (!XFS_IS_QUOTA_RUNNING(mp))
 486                return false;
 487        if (!XFS_IS_QUOTA_ON(mp))
 488                return false;
 489        if (!XFS_NOT_DQATTACHED(mp, ip))
 490                return false;
 491        if (ip->i_ino == mp->m_sb.sb_uquotino ||
 492            ip->i_ino == mp->m_sb.sb_gquotino)
 493                return false;
 494        return true;
 495}
 496
 497/*
 498 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 499 * into account.
 500 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
 501 * Inode may get unlocked and relocked in here, and the caller must deal with
 502 * the consequences.
 503 */
 504int
 505xfs_qm_dqattach_locked(
 506        xfs_inode_t     *ip,
 507        uint            flags)
 508{
 509        xfs_mount_t     *mp = ip->i_mount;
 510        uint            nquotas = 0;
 511        int             error = 0;
 512
 513        if (!xfs_qm_need_dqattach(ip))
 514                return 0;
 515
 516        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 517
 518        if (XFS_IS_UQUOTA_ON(mp)) {
 519                error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
 520                                                flags & XFS_QMOPT_DQALLOC,
 521                                                NULL, &ip->i_udquot);
 522                if (error)
 523                        goto done;
 524                nquotas++;
 525        }
 526
 527        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 528        if (XFS_IS_OQUOTA_ON(mp)) {
 529                error = XFS_IS_GQUOTA_ON(mp) ?
 530                        xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
 531                                                flags & XFS_QMOPT_DQALLOC,
 532                                                ip->i_udquot, &ip->i_gdquot) :
 533                        xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
 534                                                flags & XFS_QMOPT_DQALLOC,
 535                                                ip->i_udquot, &ip->i_gdquot);
 536                /*
 537                 * Don't worry about the udquot that we may have
 538                 * attached above. It'll get detached, if not already.
 539                 */
 540                if (error)
 541                        goto done;
 542                nquotas++;
 543        }
 544
 545        /*
 546         * Attach this group quota to the user quota as a hint.
 547         * This WON'T, in general, result in a thrash.
 548         */
 549        if (nquotas == 2) {
 550                ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 551                ASSERT(ip->i_udquot);
 552                ASSERT(ip->i_gdquot);
 553
 554                /*
 555                 * We do not have i_udquot locked at this point, but this check
 556                 * is OK since we don't depend on the i_gdquot to be accurate
 557                 * 100% all the time. It is just a hint, and this will
 558                 * succeed in general.
 559                 */
 560                if (ip->i_udquot->q_gdquot != ip->i_gdquot)
 561                        xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
 562        }
 563
 564 done:
 565#ifdef DEBUG
 566        if (!error) {
 567                if (XFS_IS_UQUOTA_ON(mp))
 568                        ASSERT(ip->i_udquot);
 569                if (XFS_IS_OQUOTA_ON(mp))
 570                        ASSERT(ip->i_gdquot);
 571        }
 572        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 573#endif
 574        return error;
 575}
 576
 577int
 578xfs_qm_dqattach(
 579        struct xfs_inode        *ip,
 580        uint                    flags)
 581{
 582        int                     error;
 583
 584        if (!xfs_qm_need_dqattach(ip))
 585                return 0;
 586
 587        xfs_ilock(ip, XFS_ILOCK_EXCL);
 588        error = xfs_qm_dqattach_locked(ip, flags);
 589        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 590
 591        return error;
 592}
 593
 594/*
 595 * Release dquots (and their references) if any.
 596 * The inode should be locked EXCL except when this's called by
 597 * xfs_ireclaim.
 598 */
 599void
 600xfs_qm_dqdetach(
 601        xfs_inode_t     *ip)
 602{
 603        if (!(ip->i_udquot || ip->i_gdquot))
 604                return;
 605
 606        trace_xfs_dquot_dqdetach(ip);
 607
 608        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
 609        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
 610        if (ip->i_udquot) {
 611                xfs_qm_dqrele(ip->i_udquot);
 612                ip->i_udquot = NULL;
 613        }
 614        if (ip->i_gdquot) {
 615                xfs_qm_dqrele(ip->i_gdquot);
 616                ip->i_gdquot = NULL;
 617        }
 618}
 619
 620/*
 621 * This initializes all the quota information that's kept in the
 622 * mount structure
 623 */
 624STATIC int
 625xfs_qm_init_quotainfo(
 626        xfs_mount_t     *mp)
 627{
 628        xfs_quotainfo_t *qinf;
 629        int             error;
 630        xfs_dquot_t     *dqp;
 631
 632        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 633
 634        qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 635
 636        /*
 637         * See if quotainodes are setup, and if not, allocate them,
 638         * and change the superblock accordingly.
 639         */
 640        if ((error = xfs_qm_init_quotainos(mp))) {
 641                kmem_free(qinf);
 642                mp->m_quotainfo = NULL;
 643                return error;
 644        }
 645
 646        INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 647        INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 648        mutex_init(&qinf->qi_tree_lock);
 649
 650        INIT_LIST_HEAD(&qinf->qi_lru_list);
 651        qinf->qi_lru_count = 0;
 652        mutex_init(&qinf->qi_lru_lock);
 653
 654        /* mutex used to serialize quotaoffs */
 655        mutex_init(&qinf->qi_quotaofflock);
 656
 657        /* Precalc some constants */
 658        qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 659        ASSERT(qinf->qi_dqchunklen);
 660        qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
 661        do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
 662
 663        mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 664
 665        /*
 666         * We try to get the limits from the superuser's limits fields.
 667         * This is quite hacky, but it is standard quota practice.
 668         *
 669         * We look at the USR dquot with id == 0 first, but if user quotas
 670         * are not enabled we goto the GRP dquot with id == 0.
 671         * We don't really care to keep separate default limits for user
 672         * and group quotas, at least not at this point.
 673         *
 674         * Since we may not have done a quotacheck by this point, just read
 675         * the dquot without attaching it to any hashtables or lists.
 676         */
 677        error = xfs_qm_dqread(mp, 0,
 678                        XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
 679                         (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
 680                          XFS_DQ_PROJ),
 681                        XFS_QMOPT_DOWARN, &dqp);
 682        if (!error) {
 683                xfs_disk_dquot_t        *ddqp = &dqp->q_core;
 684
 685                /*
 686                 * The warnings and timers set the grace period given to
 687                 * a user or group before he or she can not perform any
 688                 * more writing. If it is zero, a default is used.
 689                 */
 690                qinf->qi_btimelimit = ddqp->d_btimer ?
 691                        be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
 692                qinf->qi_itimelimit = ddqp->d_itimer ?
 693                        be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
 694                qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
 695                        be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
 696                qinf->qi_bwarnlimit = ddqp->d_bwarns ?
 697                        be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
 698                qinf->qi_iwarnlimit = ddqp->d_iwarns ?
 699                        be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
 700                qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
 701                        be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
 702                qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
 703                qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
 704                qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
 705                qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
 706                qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
 707                qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 708 
 709                xfs_qm_dqdestroy(dqp);
 710        } else {
 711                qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
 712                qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
 713                qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
 714                qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
 715                qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
 716                qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
 717        }
 718
 719        qinf->qi_shrinker.shrink = xfs_qm_shake;
 720        qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 721        register_shrinker(&qinf->qi_shrinker);
 722        return 0;
 723}
 724
 725
 726/*
 727 * Gets called when unmounting a filesystem or when all quotas get
 728 * turned off.
 729 * This purges the quota inodes, destroys locks and frees itself.
 730 */
 731void
 732xfs_qm_destroy_quotainfo(
 733        xfs_mount_t     *mp)
 734{
 735        xfs_quotainfo_t *qi;
 736
 737        qi = mp->m_quotainfo;
 738        ASSERT(qi != NULL);
 739
 740        unregister_shrinker(&qi->qi_shrinker);
 741
 742        if (qi->qi_uquotaip) {
 743                IRELE(qi->qi_uquotaip);
 744                qi->qi_uquotaip = NULL; /* paranoia */
 745        }
 746        if (qi->qi_gquotaip) {
 747                IRELE(qi->qi_gquotaip);
 748                qi->qi_gquotaip = NULL;
 749        }
 750        mutex_destroy(&qi->qi_quotaofflock);
 751        kmem_free(qi);
 752        mp->m_quotainfo = NULL;
 753}
 754
 755/*
 756 * Create an inode and return with a reference already taken, but unlocked
 757 * This is how we create quota inodes
 758 */
 759STATIC int
 760xfs_qm_qino_alloc(
 761        xfs_mount_t     *mp,
 762        xfs_inode_t     **ip,
 763        __int64_t       sbfields,
 764        uint            flags)
 765{
 766        xfs_trans_t     *tp;
 767        int             error;
 768        int             committed;
 769
 770        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
 771        if ((error = xfs_trans_reserve(tp,
 772                                      XFS_QM_QINOCREATE_SPACE_RES(mp),
 773                                      XFS_CREATE_LOG_RES(mp), 0,
 774                                      XFS_TRANS_PERM_LOG_RES,
 775                                      XFS_CREATE_LOG_COUNT))) {
 776                xfs_trans_cancel(tp, 0);
 777                return error;
 778        }
 779
 780        error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
 781        if (error) {
 782                xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
 783                                 XFS_TRANS_ABORT);
 784                return error;
 785        }
 786
 787        /*
 788         * Make the changes in the superblock, and log those too.
 789         * sbfields arg may contain fields other than *QUOTINO;
 790         * VERSIONNUM for example.
 791         */
 792        spin_lock(&mp->m_sb_lock);
 793        if (flags & XFS_QMOPT_SBVERSION) {
 794                ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 795                ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
 796                                   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
 797                       (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
 798                        XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
 799
 800                xfs_sb_version_addquota(&mp->m_sb);
 801                mp->m_sb.sb_uquotino = NULLFSINO;
 802                mp->m_sb.sb_gquotino = NULLFSINO;
 803
 804                /* qflags will get updated _after_ quotacheck */
 805                mp->m_sb.sb_qflags = 0;
 806        }
 807        if (flags & XFS_QMOPT_UQUOTA)
 808                mp->m_sb.sb_uquotino = (*ip)->i_ino;
 809        else
 810                mp->m_sb.sb_gquotino = (*ip)->i_ino;
 811        spin_unlock(&mp->m_sb_lock);
 812        xfs_mod_sb(tp, sbfields);
 813
 814        if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
 815                xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 816                return error;
 817        }
 818        return 0;
 819}
 820
 821
 822STATIC void
 823xfs_qm_reset_dqcounts(
 824        xfs_mount_t     *mp,
 825        xfs_buf_t       *bp,
 826        xfs_dqid_t      id,
 827        uint            type)
 828{
 829        xfs_disk_dquot_t        *ddq;
 830        int                     j;
 831
 832        trace_xfs_reset_dqcounts(bp, _RET_IP_);
 833
 834        /*
 835         * Reset all counters and timers. They'll be
 836         * started afresh by xfs_qm_quotacheck.
 837         */
 838#ifdef DEBUG
 839        j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 840        do_div(j, sizeof(xfs_dqblk_t));
 841        ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 842#endif
 843        ddq = bp->b_addr;
 844        for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 845                /*
 846                 * Do a sanity check, and if needed, repair the dqblk. Don't
 847                 * output any warnings because it's perfectly possible to
 848                 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
 849                 */
 850                (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
 851                                      "xfs_quotacheck");
 852                ddq->d_bcount = 0;
 853                ddq->d_icount = 0;
 854                ddq->d_rtbcount = 0;
 855                ddq->d_btimer = 0;
 856                ddq->d_itimer = 0;
 857                ddq->d_rtbtimer = 0;
 858                ddq->d_bwarns = 0;
 859                ddq->d_iwarns = 0;
 860                ddq->d_rtbwarns = 0;
 861                ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
 862        }
 863}
 864
 865STATIC int
 866xfs_qm_dqiter_bufs(
 867        struct xfs_mount        *mp,
 868        xfs_dqid_t              firstid,
 869        xfs_fsblock_t           bno,
 870        xfs_filblks_t           blkcnt,
 871        uint                    flags,
 872        struct list_head        *buffer_list)
 873{
 874        struct xfs_buf          *bp;
 875        int                     error;
 876        int                     type;
 877
 878        ASSERT(blkcnt > 0);
 879        type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
 880                (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
 881        error = 0;
 882
 883        /*
 884         * Blkcnt arg can be a very big number, and might even be
 885         * larger than the log itself. So, we have to break it up into
 886         * manageable-sized transactions.
 887         * Note that we don't start a permanent transaction here; we might
 888         * not be able to get a log reservation for the whole thing up front,
 889         * and we don't really care to either, because we just discard
 890         * everything if we were to crash in the middle of this loop.
 891         */
 892        while (blkcnt--) {
 893                error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 894                              XFS_FSB_TO_DADDR(mp, bno),
 895                              mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 896                              &xfs_dquot_buf_ops);
 897                if (error)
 898                        break;
 899
 900                xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 901                xfs_buf_delwri_queue(bp, buffer_list);
 902                xfs_buf_relse(bp);
 903                /*
 904                 * goto the next block.
 905                 */
 906                bno++;
 907                firstid += mp->m_quotainfo->qi_dqperchunk;
 908        }
 909
 910        return error;
 911}
 912
 913/*
 914 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
 915 * caller supplied function for every chunk of dquots that we find.
 916 */
 917STATIC int
 918xfs_qm_dqiterate(
 919        struct xfs_mount        *mp,
 920        struct xfs_inode        *qip,
 921        uint                    flags,
 922        struct list_head        *buffer_list)
 923{
 924        struct xfs_bmbt_irec    *map;
 925        int                     i, nmaps;       /* number of map entries */
 926        int                     error;          /* return value */
 927        xfs_fileoff_t           lblkno;
 928        xfs_filblks_t           maxlblkcnt;
 929        xfs_dqid_t              firstid;
 930        xfs_fsblock_t           rablkno;
 931        xfs_filblks_t           rablkcnt;
 932
 933        error = 0;
 934        /*
 935         * This looks racy, but we can't keep an inode lock across a
 936         * trans_reserve. But, this gets called during quotacheck, and that
 937         * happens only at mount time which is single threaded.
 938         */
 939        if (qip->i_d.di_nblocks == 0)
 940                return 0;
 941
 942        map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
 943
 944        lblkno = 0;
 945        maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 946        do {
 947                nmaps = XFS_DQITER_MAP_SIZE;
 948                /*
 949                 * We aren't changing the inode itself. Just changing
 950                 * some of its data. No new blocks are added here, and
 951                 * the inode is never added to the transaction.
 952                 */
 953                xfs_ilock(qip, XFS_ILOCK_SHARED);
 954                error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
 955                                       map, &nmaps, 0);
 956                xfs_iunlock(qip, XFS_ILOCK_SHARED);
 957                if (error)
 958                        break;
 959
 960                ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
 961                for (i = 0; i < nmaps; i++) {
 962                        ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
 963                        ASSERT(map[i].br_blockcount);
 964
 965
 966                        lblkno += map[i].br_blockcount;
 967
 968                        if (map[i].br_startblock == HOLESTARTBLOCK)
 969                                continue;
 970
 971                        firstid = (xfs_dqid_t) map[i].br_startoff *
 972                                mp->m_quotainfo->qi_dqperchunk;
 973                        /*
 974                         * Do a read-ahead on the next extent.
 975                         */
 976                        if ((i+1 < nmaps) &&
 977                            (map[i+1].br_startblock != HOLESTARTBLOCK)) {
 978                                rablkcnt =  map[i+1].br_blockcount;
 979                                rablkno = map[i+1].br_startblock;
 980                                while (rablkcnt--) {
 981                                        xfs_buf_readahead(mp->m_ddev_targp,
 982                                               XFS_FSB_TO_DADDR(mp, rablkno),
 983                                               mp->m_quotainfo->qi_dqchunklen,
 984                                               NULL);
 985                                        rablkno++;
 986                                }
 987                        }
 988                        /*
 989                         * Iterate thru all the blks in the extent and
 990                         * reset the counters of all the dquots inside them.
 991                         */
 992                        error = xfs_qm_dqiter_bufs(mp, firstid,
 993                                                   map[i].br_startblock,
 994                                                   map[i].br_blockcount,
 995                                                   flags, buffer_list);
 996                        if (error)
 997                                goto out;
 998                }
 999        } while (nmaps > 0);
1000
1001out:
1002        kmem_free(map);
1003        return error;
1004}
1005
1006/*
1007 * Called by dqusage_adjust in doing a quotacheck.
1008 *
1009 * Given the inode, and a dquot id this updates both the incore dqout as well
1010 * as the buffer copy. This is so that once the quotacheck is done, we can
1011 * just log all the buffers, as opposed to logging numerous updates to
1012 * individual dquots.
1013 */
1014STATIC int
1015xfs_qm_quotacheck_dqadjust(
1016        struct xfs_inode        *ip,
1017        xfs_dqid_t              id,
1018        uint                    type,
1019        xfs_qcnt_t              nblks,
1020        xfs_qcnt_t              rtblks)
1021{
1022        struct xfs_mount        *mp = ip->i_mount;
1023        struct xfs_dquot        *dqp;
1024        int                     error;
1025
1026        error = xfs_qm_dqget(mp, ip, id, type,
1027                             XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1028        if (error) {
1029                /*
1030                 * Shouldn't be able to turn off quotas here.
1031                 */
1032                ASSERT(error != ESRCH);
1033                ASSERT(error != ENOENT);
1034                return error;
1035        }
1036
1037        trace_xfs_dqadjust(dqp);
1038
1039        /*
1040         * Adjust the inode count and the block count to reflect this inode's
1041         * resource usage.
1042         */
1043        be64_add_cpu(&dqp->q_core.d_icount, 1);
1044        dqp->q_res_icount++;
1045        if (nblks) {
1046                be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1047                dqp->q_res_bcount += nblks;
1048        }
1049        if (rtblks) {
1050                be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1051                dqp->q_res_rtbcount += rtblks;
1052        }
1053
1054        /*
1055         * Set default limits, adjust timers (since we changed usages)
1056         *
1057         * There are no timers for the default values set in the root dquot.
1058         */
1059        if (dqp->q_core.d_id) {
1060                xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
1061                xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1062        }
1063
1064        dqp->dq_flags |= XFS_DQ_DIRTY;
1065        xfs_qm_dqput(dqp);
1066        return 0;
1067}
1068
1069STATIC int
1070xfs_qm_get_rtblks(
1071        xfs_inode_t     *ip,
1072        xfs_qcnt_t      *O_rtblks)
1073{
1074        xfs_filblks_t   rtblks;                 /* total rt blks */
1075        xfs_extnum_t    idx;                    /* extent record index */
1076        xfs_ifork_t     *ifp;                   /* inode fork pointer */
1077        xfs_extnum_t    nextents;               /* number of extent entries */
1078        int             error;
1079
1080        ASSERT(XFS_IS_REALTIME_INODE(ip));
1081        ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1082        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1083                if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1084                        return error;
1085        }
1086        rtblks = 0;
1087        nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1088        for (idx = 0; idx < nextents; idx++)
1089                rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1090        *O_rtblks = (xfs_qcnt_t)rtblks;
1091        return 0;
1092}
1093
1094/*
1095 * callback routine supplied to bulkstat(). Given an inumber, find its
1096 * dquots and update them to account for resources taken by that inode.
1097 */
1098/* ARGSUSED */
1099STATIC int
1100xfs_qm_dqusage_adjust(
1101        xfs_mount_t     *mp,            /* mount point for filesystem */
1102        xfs_ino_t       ino,            /* inode number to get data for */
1103        void            __user *buffer, /* not used */
1104        int             ubsize,         /* not used */
1105        int             *ubused,        /* not used */
1106        int             *res)           /* result code value */
1107{
1108        xfs_inode_t     *ip;
1109        xfs_qcnt_t      nblks, rtblks = 0;
1110        int             error;
1111
1112        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1113
1114        /*
1115         * rootino must have its resources accounted for, not so with the quota
1116         * inodes.
1117         */
1118        if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1119                *res = BULKSTAT_RV_NOTHING;
1120                return XFS_ERROR(EINVAL);
1121        }
1122
1123        /*
1124         * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1125         * interface expects the inode to be exclusively locked because that's
1126         * the case in all other instances. It's OK that we do this because
1127         * quotacheck is done only at mount time.
1128         */
1129        error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1130        if (error) {
1131                *res = BULKSTAT_RV_NOTHING;
1132                return error;
1133        }
1134
1135        ASSERT(ip->i_delayed_blks == 0);
1136
1137        if (XFS_IS_REALTIME_INODE(ip)) {
1138                /*
1139                 * Walk thru the extent list and count the realtime blocks.
1140                 */
1141                error = xfs_qm_get_rtblks(ip, &rtblks);
1142                if (error)
1143                        goto error0;
1144        }
1145
1146        nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1147
1148        /*
1149         * Add the (disk blocks and inode) resources occupied by this
1150         * inode to its dquots. We do this adjustment in the incore dquot,
1151         * and also copy the changes to its buffer.
1152         * We don't care about putting these changes in a transaction
1153         * envelope because if we crash in the middle of a 'quotacheck'
1154         * we have to start from the beginning anyway.
1155         * Once we're done, we'll log all the dquot bufs.
1156         *
1157         * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1158         * and quotaoffs don't race. (Quotachecks happen at mount time only).
1159         */
1160        if (XFS_IS_UQUOTA_ON(mp)) {
1161                error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1162                                                   XFS_DQ_USER, nblks, rtblks);
1163                if (error)
1164                        goto error0;
1165        }
1166
1167        if (XFS_IS_GQUOTA_ON(mp)) {
1168                error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1169                                                   XFS_DQ_GROUP, nblks, rtblks);
1170                if (error)
1171                        goto error0;
1172        }
1173
1174        if (XFS_IS_PQUOTA_ON(mp)) {
1175                error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1176                                                   XFS_DQ_PROJ, nblks, rtblks);
1177                if (error)
1178                        goto error0;
1179        }
1180
1181        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1182        IRELE(ip);
1183        *res = BULKSTAT_RV_DIDONE;
1184        return 0;
1185
1186error0:
1187        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1188        IRELE(ip);
1189        *res = BULKSTAT_RV_GIVEUP;
1190        return error;
1191}
1192
1193STATIC int
1194xfs_qm_flush_one(
1195        struct xfs_dquot        *dqp,
1196        void                    *data)
1197{
1198        struct list_head        *buffer_list = data;
1199        struct xfs_buf          *bp = NULL;
1200        int                     error = 0;
1201
1202        xfs_dqlock(dqp);
1203        if (dqp->dq_flags & XFS_DQ_FREEING)
1204                goto out_unlock;
1205        if (!XFS_DQ_IS_DIRTY(dqp))
1206                goto out_unlock;
1207
1208        xfs_dqflock(dqp);
1209        error = xfs_qm_dqflush(dqp, &bp);
1210        if (error)
1211                goto out_unlock;
1212
1213        xfs_buf_delwri_queue(bp, buffer_list);
1214        xfs_buf_relse(bp);
1215out_unlock:
1216        xfs_dqunlock(dqp);
1217        return error;
1218}
1219
1220/*
1221 * Walk thru all the filesystem inodes and construct a consistent view
1222 * of the disk quota world. If the quotacheck fails, disable quotas.
1223 */
1224int
1225xfs_qm_quotacheck(
1226        xfs_mount_t     *mp)
1227{
1228        int             done, count, error, error2;
1229        xfs_ino_t       lastino;
1230        size_t          structsz;
1231        xfs_inode_t     *uip, *gip;
1232        uint            flags;
1233        LIST_HEAD       (buffer_list);
1234
1235        count = INT_MAX;
1236        structsz = 1;
1237        lastino = 0;
1238        flags = 0;
1239
1240        ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
1241        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1242
1243        xfs_notice(mp, "Quotacheck needed: Please wait.");
1244
1245        /*
1246         * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1247         * their counters to zero. We need a clean slate.
1248         * We don't log our changes till later.
1249         */
1250        uip = mp->m_quotainfo->qi_uquotaip;
1251        if (uip) {
1252                error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1253                                         &buffer_list);
1254                if (error)
1255                        goto error_return;
1256                flags |= XFS_UQUOTA_CHKD;
1257        }
1258
1259        gip = mp->m_quotainfo->qi_gquotaip;
1260        if (gip) {
1261                error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
1262                                         XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA,
1263                                         &buffer_list);
1264                if (error)
1265                        goto error_return;
1266                flags |= XFS_OQUOTA_CHKD;
1267        }
1268
1269        do {
1270                /*
1271                 * Iterate thru all the inodes in the file system,
1272                 * adjusting the corresponding dquot counters in core.
1273                 */
1274                error = xfs_bulkstat(mp, &lastino, &count,
1275                                     xfs_qm_dqusage_adjust,
1276                                     structsz, NULL, &done);
1277                if (error)
1278                        break;
1279
1280        } while (!done);
1281
1282        /*
1283         * We've made all the changes that we need to make incore.  Flush them
1284         * down to disk buffers if everything was updated successfully.
1285         */
1286        if (XFS_IS_UQUOTA_ON(mp)) {
1287                error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1288                                          &buffer_list);
1289        }
1290        if (XFS_IS_GQUOTA_ON(mp)) {
1291                error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1292                                           &buffer_list);
1293                if (!error)
1294                        error = error2;
1295        }
1296        if (XFS_IS_PQUOTA_ON(mp)) {
1297                error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1298                                           &buffer_list);
1299                if (!error)
1300                        error = error2;
1301        }
1302
1303        error2 = xfs_buf_delwri_submit(&buffer_list);
1304        if (!error)
1305                error = error2;
1306
1307        /*
1308         * We can get this error if we couldn't do a dquot allocation inside
1309         * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1310         * dirty dquots that might be cached, we just want to get rid of them
1311         * and turn quotaoff. The dquots won't be attached to any of the inodes
1312         * at this point (because we intentionally didn't in dqget_noattach).
1313         */
1314        if (error) {
1315                xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1316                goto error_return;
1317        }
1318
1319        /*
1320         * If one type of quotas is off, then it will lose its
1321         * quotachecked status, since we won't be doing accounting for
1322         * that type anymore.
1323         */
1324        mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1325        mp->m_qflags |= flags;
1326
1327 error_return:
1328        while (!list_empty(&buffer_list)) {
1329                struct xfs_buf *bp =
1330                        list_first_entry(&buffer_list, struct xfs_buf, b_list);
1331                list_del_init(&bp->b_list);
1332                xfs_buf_relse(bp);
1333        }
1334
1335        if (error) {
1336                xfs_warn(mp,
1337        "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1338                        error);
1339                /*
1340                 * We must turn off quotas.
1341                 */
1342                ASSERT(mp->m_quotainfo != NULL);
1343                xfs_qm_destroy_quotainfo(mp);
1344                if (xfs_mount_reset_sbqflags(mp)) {
1345                        xfs_warn(mp,
1346                                "Quotacheck: Failed to reset quota flags.");
1347                }
1348        } else
1349                xfs_notice(mp, "Quotacheck: Done.");
1350        return (error);
1351}
1352
1353/*
1354 * This is called after the superblock has been read in and we're ready to
1355 * iget the quota inodes.
1356 */
1357STATIC int
1358xfs_qm_init_quotainos(
1359        xfs_mount_t     *mp)
1360{
1361        xfs_inode_t     *uip, *gip;
1362        int             error;
1363        __int64_t       sbflags;
1364        uint            flags;
1365
1366        ASSERT(mp->m_quotainfo);
1367        uip = gip = NULL;
1368        sbflags = 0;
1369        flags = 0;
1370
1371        /*
1372         * Get the uquota and gquota inodes
1373         */
1374        if (xfs_sb_version_hasquota(&mp->m_sb)) {
1375                if (XFS_IS_UQUOTA_ON(mp) &&
1376                    mp->m_sb.sb_uquotino != NULLFSINO) {
1377                        ASSERT(mp->m_sb.sb_uquotino > 0);
1378                        if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1379                                             0, 0, &uip)))
1380                                return XFS_ERROR(error);
1381                }
1382                if (XFS_IS_OQUOTA_ON(mp) &&
1383                    mp->m_sb.sb_gquotino != NULLFSINO) {
1384                        ASSERT(mp->m_sb.sb_gquotino > 0);
1385                        if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1386                                             0, 0, &gip))) {
1387                                if (uip)
1388                                        IRELE(uip);
1389                                return XFS_ERROR(error);
1390                        }
1391                }
1392        } else {
1393                flags |= XFS_QMOPT_SBVERSION;
1394                sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1395                            XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
1396        }
1397
1398        /*
1399         * Create the two inodes, if they don't exist already. The changes
1400         * made above will get added to a transaction and logged in one of
1401         * the qino_alloc calls below.  If the device is readonly,
1402         * temporarily switch to read-write to do this.
1403         */
1404        if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1405                if ((error = xfs_qm_qino_alloc(mp, &uip,
1406                                              sbflags | XFS_SB_UQUOTINO,
1407                                              flags | XFS_QMOPT_UQUOTA)))
1408                        return XFS_ERROR(error);
1409
1410                flags &= ~XFS_QMOPT_SBVERSION;
1411        }
1412        if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
1413                flags |= (XFS_IS_GQUOTA_ON(mp) ?
1414                                XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
1415                error = xfs_qm_qino_alloc(mp, &gip,
1416                                          sbflags | XFS_SB_GQUOTINO, flags);
1417                if (error) {
1418                        if (uip)
1419                                IRELE(uip);
1420
1421                        return XFS_ERROR(error);
1422                }
1423        }
1424
1425        mp->m_quotainfo->qi_uquotaip = uip;
1426        mp->m_quotainfo->qi_gquotaip = gip;
1427
1428        return 0;
1429}
1430
1431STATIC void
1432xfs_qm_dqfree_one(
1433        struct xfs_dquot        *dqp)
1434{
1435        struct xfs_mount        *mp = dqp->q_mount;
1436        struct xfs_quotainfo    *qi = mp->m_quotainfo;
1437
1438        mutex_lock(&qi->qi_tree_lock);
1439        radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
1440                          be32_to_cpu(dqp->q_core.d_id));
1441
1442        qi->qi_dquots--;
1443        mutex_unlock(&qi->qi_tree_lock);
1444
1445        xfs_qm_dqdestroy(dqp);
1446}
1447
1448STATIC void
1449xfs_qm_dqreclaim_one(
1450        struct xfs_dquot        *dqp,
1451        struct list_head        *buffer_list,
1452        struct list_head        *dispose_list)
1453{
1454        struct xfs_mount        *mp = dqp->q_mount;
1455        struct xfs_quotainfo    *qi = mp->m_quotainfo;
1456        int                     error;
1457
1458        if (!xfs_dqlock_nowait(dqp))
1459                goto out_move_tail;
1460
1461        /*
1462         * This dquot has acquired a reference in the meantime remove it from
1463         * the freelist and try again.
1464         */
1465        if (dqp->q_nrefs) {
1466                xfs_dqunlock(dqp);
1467
1468                trace_xfs_dqreclaim_want(dqp);
1469                XFS_STATS_INC(xs_qm_dqwants);
1470
1471                list_del_init(&dqp->q_lru);
1472                qi->qi_lru_count--;
1473                XFS_STATS_DEC(xs_qm_dquot_unused);
1474                return;
1475        }
1476
1477        /*
1478         * Try to grab the flush lock. If this dquot is in the process of
1479         * getting flushed to disk, we don't want to reclaim it.
1480         */
1481        if (!xfs_dqflock_nowait(dqp))
1482                goto out_unlock_move_tail;
1483
1484        if (XFS_DQ_IS_DIRTY(dqp)) {
1485                struct xfs_buf  *bp = NULL;
1486
1487                trace_xfs_dqreclaim_dirty(dqp);
1488
1489                error = xfs_qm_dqflush(dqp, &bp);
1490                if (error) {
1491                        xfs_warn(mp, "%s: dquot %p flush failed",
1492                                 __func__, dqp);
1493                        goto out_unlock_move_tail;
1494                }
1495
1496                xfs_buf_delwri_queue(bp, buffer_list);
1497                xfs_buf_relse(bp);
1498                /*
1499                 * Give the dquot another try on the freelist, as the
1500                 * flushing will take some time.
1501                 */
1502                goto out_unlock_move_tail;
1503        }
1504        xfs_dqfunlock(dqp);
1505
1506        /*
1507         * Prevent lookups now that we are past the point of no return.
1508         */
1509        dqp->dq_flags |= XFS_DQ_FREEING;
1510        xfs_dqunlock(dqp);
1511
1512        ASSERT(dqp->q_nrefs == 0);
1513        list_move_tail(&dqp->q_lru, dispose_list);
1514        qi->qi_lru_count--;
1515        XFS_STATS_DEC(xs_qm_dquot_unused);
1516
1517        trace_xfs_dqreclaim_done(dqp);
1518        XFS_STATS_INC(xs_qm_dqreclaims);
1519        return;
1520
1521        /*
1522         * Move the dquot to the tail of the list so that we don't spin on it.
1523         */
1524out_unlock_move_tail:
1525        xfs_dqunlock(dqp);
1526out_move_tail:
1527        list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
1528        trace_xfs_dqreclaim_busy(dqp);
1529        XFS_STATS_INC(xs_qm_dqreclaim_misses);
1530}
1531
1532STATIC int
1533xfs_qm_shake(
1534        struct shrinker         *shrink,
1535        struct shrink_control   *sc)
1536{
1537        struct xfs_quotainfo    *qi =
1538                container_of(shrink, struct xfs_quotainfo, qi_shrinker);
1539        int                     nr_to_scan = sc->nr_to_scan;
1540        LIST_HEAD               (buffer_list);
1541        LIST_HEAD               (dispose_list);
1542        struct xfs_dquot        *dqp;
1543        int                     error;
1544
1545        if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1546                return 0;
1547        if (!nr_to_scan)
1548                goto out;
1549
1550        mutex_lock(&qi->qi_lru_lock);
1551        while (!list_empty(&qi->qi_lru_list)) {
1552                if (nr_to_scan-- <= 0)
1553                        break;
1554                dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
1555                                       q_lru);
1556                xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
1557        }
1558        mutex_unlock(&qi->qi_lru_lock);
1559
1560        error = xfs_buf_delwri_submit(&buffer_list);
1561        if (error)
1562                xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
1563
1564        while (!list_empty(&dispose_list)) {
1565                dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
1566                list_del_init(&dqp->q_lru);
1567                xfs_qm_dqfree_one(dqp);
1568        }
1569
1570out:
1571        return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
1572}
1573
1574/*
1575 * Start a transaction and write the incore superblock changes to
1576 * disk. flags parameter indicates which fields have changed.
1577 */
1578int
1579xfs_qm_write_sb_changes(
1580        xfs_mount_t     *mp,
1581        __int64_t       flags)
1582{
1583        xfs_trans_t     *tp;
1584        int             error;
1585
1586        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1587        if ((error = xfs_trans_reserve(tp, 0,
1588                                      mp->m_sb.sb_sectsize + 128, 0,
1589                                      0,
1590                                      XFS_DEFAULT_LOG_COUNT))) {
1591                xfs_trans_cancel(tp, 0);
1592                return error;
1593        }
1594
1595        xfs_mod_sb(tp, flags);
1596        error = xfs_trans_commit(tp, 0);
1597
1598        return error;
1599}
1600
1601
1602/* --------------- utility functions for vnodeops ---------------- */
1603
1604
1605/*
1606 * Given an inode, a uid, gid and prid make sure that we have
1607 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1608 * quotas by creating this file.
1609 * This also attaches dquot(s) to the given inode after locking it,
1610 * and returns the dquots corresponding to the uid and/or gid.
1611 *
1612 * in   : inode (unlocked)
1613 * out  : udquot, gdquot with references taken and unlocked
1614 */
1615int
1616xfs_qm_vop_dqalloc(
1617        struct xfs_inode        *ip,
1618        uid_t                   uid,
1619        gid_t                   gid,
1620        prid_t                  prid,
1621        uint                    flags,
1622        struct xfs_dquot        **O_udqpp,
1623        struct xfs_dquot        **O_gdqpp)
1624{
1625        struct xfs_mount        *mp = ip->i_mount;
1626        struct xfs_dquot        *uq, *gq;
1627        int                     error;
1628        uint                    lockflags;
1629
1630        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1631                return 0;
1632
1633        lockflags = XFS_ILOCK_EXCL;
1634        xfs_ilock(ip, lockflags);
1635
1636        if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1637                gid = ip->i_d.di_gid;
1638
1639        /*
1640         * Attach the dquot(s) to this inode, doing a dquot allocation
1641         * if necessary. The dquot(s) will not be locked.
1642         */
1643        if (XFS_NOT_DQATTACHED(mp, ip)) {
1644                error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1645                if (error) {
1646                        xfs_iunlock(ip, lockflags);
1647                        return error;
1648                }
1649        }
1650
1651        uq = gq = NULL;
1652        if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1653                if (ip->i_d.di_uid != uid) {
1654                        /*
1655                         * What we need is the dquot that has this uid, and
1656                         * if we send the inode to dqget, the uid of the inode
1657                         * takes priority over what's sent in the uid argument.
1658                         * We must unlock inode here before calling dqget if
1659                         * we're not sending the inode, because otherwise
1660                         * we'll deadlock by doing trans_reserve while
1661                         * holding ilock.
1662                         */
1663                        xfs_iunlock(ip, lockflags);
1664                        if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
1665                                                 XFS_DQ_USER,
1666                                                 XFS_QMOPT_DQALLOC |
1667                                                 XFS_QMOPT_DOWARN,
1668                                                 &uq))) {
1669                                ASSERT(error != ENOENT);
1670                                return error;
1671                        }
1672                        /*
1673                         * Get the ilock in the right order.
1674                         */
1675                        xfs_dqunlock(uq);
1676                        lockflags = XFS_ILOCK_SHARED;
1677                        xfs_ilock(ip, lockflags);
1678                } else {
1679                        /*
1680                         * Take an extra reference, because we'll return
1681                         * this to caller
1682                         */
1683                        ASSERT(ip->i_udquot);
1684                        uq = xfs_qm_dqhold(ip->i_udquot);
1685                }
1686        }
1687        if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1688                if (ip->i_d.di_gid != gid) {
1689                        xfs_iunlock(ip, lockflags);
1690                        if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
1691                                                 XFS_DQ_GROUP,
1692                                                 XFS_QMOPT_DQALLOC |
1693                                                 XFS_QMOPT_DOWARN,
1694                                                 &gq))) {
1695                                if (uq)
1696                                        xfs_qm_dqrele(uq);
1697                                ASSERT(error != ENOENT);
1698                                return error;
1699                        }
1700                        xfs_dqunlock(gq);
1701                        lockflags = XFS_ILOCK_SHARED;
1702                        xfs_ilock(ip, lockflags);
1703                } else {
1704                        ASSERT(ip->i_gdquot);
1705                        gq = xfs_qm_dqhold(ip->i_gdquot);
1706                }
1707        } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1708                if (xfs_get_projid(ip) != prid) {
1709                        xfs_iunlock(ip, lockflags);
1710                        if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1711                                                 XFS_DQ_PROJ,
1712                                                 XFS_QMOPT_DQALLOC |
1713                                                 XFS_QMOPT_DOWARN,
1714                                                 &gq))) {
1715                                if (uq)
1716                                        xfs_qm_dqrele(uq);
1717                                ASSERT(error != ENOENT);
1718                                return (error);
1719                        }
1720                        xfs_dqunlock(gq);
1721                        lockflags = XFS_ILOCK_SHARED;
1722                        xfs_ilock(ip, lockflags);
1723                } else {
1724                        ASSERT(ip->i_gdquot);
1725                        gq = xfs_qm_dqhold(ip->i_gdquot);
1726                }
1727        }
1728        if (uq)
1729                trace_xfs_dquot_dqalloc(ip);
1730
1731        xfs_iunlock(ip, lockflags);
1732        if (O_udqpp)
1733                *O_udqpp = uq;
1734        else if (uq)
1735                xfs_qm_dqrele(uq);
1736        if (O_gdqpp)
1737                *O_gdqpp = gq;
1738        else if (gq)
1739                xfs_qm_dqrele(gq);
1740        return 0;
1741}
1742
1743/*
1744 * Actually transfer ownership, and do dquot modifications.
1745 * These were already reserved.
1746 */
1747xfs_dquot_t *
1748xfs_qm_vop_chown(
1749        xfs_trans_t     *tp,
1750        xfs_inode_t     *ip,
1751        xfs_dquot_t     **IO_olddq,
1752        xfs_dquot_t     *newdq)
1753{
1754        xfs_dquot_t     *prevdq;
1755        uint            bfield = XFS_IS_REALTIME_INODE(ip) ?
1756                                 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1757
1758
1759        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1760        ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1761
1762        /* old dquot */
1763        prevdq = *IO_olddq;
1764        ASSERT(prevdq);
1765        ASSERT(prevdq != newdq);
1766
1767        xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1768        xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1769
1770        /* the sparkling new dquot */
1771        xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1772        xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1773
1774        /*
1775         * Take an extra reference, because the inode is going to keep
1776         * this dquot pointer even after the trans_commit.
1777         */
1778        *IO_olddq = xfs_qm_dqhold(newdq);
1779
1780        return prevdq;
1781}
1782
1783/*
1784 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1785 */
1786int
1787xfs_qm_vop_chown_reserve(
1788        xfs_trans_t     *tp,
1789        xfs_inode_t     *ip,
1790        xfs_dquot_t     *udqp,
1791        xfs_dquot_t     *gdqp,
1792        uint            flags)
1793{
1794        xfs_mount_t     *mp = ip->i_mount;
1795        uint            delblks, blkflags, prjflags = 0;
1796        xfs_dquot_t     *unresudq, *unresgdq, *delblksudq, *delblksgdq;
1797        int             error;
1798
1799
1800        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1801        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1802
1803        delblks = ip->i_delayed_blks;
1804        delblksudq = delblksgdq = unresudq = unresgdq = NULL;
1805        blkflags = XFS_IS_REALTIME_INODE(ip) ?
1806                        XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1807
1808        if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1809            ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
1810                delblksudq = udqp;
1811                /*
1812                 * If there are delayed allocation blocks, then we have to
1813                 * unreserve those from the old dquot, and add them to the
1814                 * new dquot.
1815                 */
1816                if (delblks) {
1817                        ASSERT(ip->i_udquot);
1818                        unresudq = ip->i_udquot;
1819                }
1820        }
1821        if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
1822                if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
1823                     xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
1824                        prjflags = XFS_QMOPT_ENOSPC;
1825
1826                if (prjflags ||
1827                    (XFS_IS_GQUOTA_ON(ip->i_mount) &&
1828                     ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
1829                        delblksgdq = gdqp;
1830                        if (delblks) {
1831                                ASSERT(ip->i_gdquot);
1832                                unresgdq = ip->i_gdquot;
1833                        }
1834                }
1835        }
1836
1837        if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1838                                delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
1839                                flags | blkflags | prjflags)))
1840                return (error);
1841
1842        /*
1843         * Do the delayed blks reservations/unreservations now. Since, these
1844         * are done without the help of a transaction, if a reservation fails
1845         * its previous reservations won't be automatically undone by trans
1846         * code. So, we have to do it manually here.
1847         */
1848        if (delblks) {
1849                /*
1850                 * Do the reservations first. Unreservation can't fail.
1851                 */
1852                ASSERT(delblksudq || delblksgdq);
1853                ASSERT(unresudq || unresgdq);
1854                if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1855                                delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
1856                                flags | blkflags | prjflags)))
1857                        return (error);
1858                xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1859                                unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
1860                                blkflags);
1861        }
1862
1863        return (0);
1864}
1865
1866int
1867xfs_qm_vop_rename_dqattach(
1868        struct xfs_inode        **i_tab)
1869{
1870        struct xfs_mount        *mp = i_tab[0]->i_mount;
1871        int                     i;
1872
1873        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1874                return 0;
1875
1876        for (i = 0; (i < 4 && i_tab[i]); i++) {
1877                struct xfs_inode        *ip = i_tab[i];
1878                int                     error;
1879
1880                /*
1881                 * Watch out for duplicate entries in the table.
1882                 */
1883                if (i == 0 || ip != i_tab[i-1]) {
1884                        if (XFS_NOT_DQATTACHED(mp, ip)) {
1885                                error = xfs_qm_dqattach(ip, 0);
1886                                if (error)
1887                                        return error;
1888                        }
1889                }
1890        }
1891        return 0;
1892}
1893
1894void
1895xfs_qm_vop_create_dqattach(
1896        struct xfs_trans        *tp,
1897        struct xfs_inode        *ip,
1898        struct xfs_dquot        *udqp,
1899        struct xfs_dquot        *gdqp)
1900{
1901        struct xfs_mount        *mp = tp->t_mountp;
1902
1903        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1904                return;
1905
1906        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1907        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1908
1909        if (udqp) {
1910                ASSERT(ip->i_udquot == NULL);
1911                ASSERT(XFS_IS_UQUOTA_ON(mp));
1912                ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1913
1914                ip->i_udquot = xfs_qm_dqhold(udqp);
1915                xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1916        }
1917        if (gdqp) {
1918                ASSERT(ip->i_gdquot == NULL);
1919                ASSERT(XFS_IS_OQUOTA_ON(mp));
1920                ASSERT((XFS_IS_GQUOTA_ON(mp) ?
1921                        ip->i_d.di_gid : xfs_get_projid(ip)) ==
1922                                be32_to_cpu(gdqp->q_core.d_id));
1923
1924                ip->i_gdquot = xfs_qm_dqhold(gdqp);
1925                xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1926        }
1927}
1928
1929
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.