linux/fs/xfs/xfs_icache.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_log.h"
  22#include "xfs_log_priv.h"
  23#include "xfs_inum.h"
  24#include "xfs_trans.h"
  25#include "xfs_trans_priv.h"
  26#include "xfs_sb.h"
  27#include "xfs_ag.h"
  28#include "xfs_mount.h"
  29#include "xfs_bmap_btree.h"
  30#include "xfs_inode.h"
  31#include "xfs_dinode.h"
  32#include "xfs_error.h"
  33#include "xfs_filestream.h"
  34#include "xfs_vnodeops.h"
  35#include "xfs_inode_item.h"
  36#include "xfs_quota.h"
  37#include "xfs_trace.h"
  38#include "xfs_fsops.h"
  39#include "xfs_icache.h"
  40
  41#include <linux/kthread.h>
  42#include <linux/freezer.h>
  43
  44STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
  45                                struct xfs_perag *pag, struct xfs_inode *ip);
  46
  47/*
  48 * Allocate and initialise an xfs_inode.
  49 */
  50STATIC struct xfs_inode *
  51xfs_inode_alloc(
  52        struct xfs_mount        *mp,
  53        xfs_ino_t               ino)
  54{
  55        struct xfs_inode        *ip;
  56
  57        /*
  58         * if this didn't occur in transactions, we could use
  59         * KM_MAYFAIL and return NULL here on ENOMEM. Set the
  60         * code up to do this anyway.
  61         */
  62        ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
  63        if (!ip)
  64                return NULL;
  65        if (inode_init_always(mp->m_super, VFS_I(ip))) {
  66                kmem_zone_free(xfs_inode_zone, ip);
  67                return NULL;
  68        }
  69
  70        ASSERT(atomic_read(&ip->i_pincount) == 0);
  71        ASSERT(!spin_is_locked(&ip->i_flags_lock));
  72        ASSERT(!xfs_isiflocked(ip));
  73        ASSERT(ip->i_ino == 0);
  74
  75        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
  76
  77        /* initialise the xfs inode */
  78        ip->i_ino = ino;
  79        ip->i_mount = mp;
  80        memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
  81        ip->i_afp = NULL;
  82        memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
  83        ip->i_flags = 0;
  84        ip->i_delayed_blks = 0;
  85        memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
  86
  87        return ip;
  88}
  89
  90STATIC void
  91xfs_inode_free_callback(
  92        struct rcu_head         *head)
  93{
  94        struct inode            *inode = container_of(head, struct inode, i_rcu);
  95        struct xfs_inode        *ip = XFS_I(inode);
  96
  97        kmem_zone_free(xfs_inode_zone, ip);
  98}
  99
 100STATIC void
 101xfs_inode_free(
 102        struct xfs_inode        *ip)
 103{
 104        switch (ip->i_d.di_mode & S_IFMT) {
 105        case S_IFREG:
 106        case S_IFDIR:
 107        case S_IFLNK:
 108                xfs_idestroy_fork(ip, XFS_DATA_FORK);
 109                break;
 110        }
 111
 112        if (ip->i_afp)
 113                xfs_idestroy_fork(ip, XFS_ATTR_FORK);
 114
 115        if (ip->i_itemp) {
 116                ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
 117                xfs_inode_item_destroy(ip);
 118                ip->i_itemp = NULL;
 119        }
 120
 121        /* asserts to verify all state is correct here */
 122        ASSERT(atomic_read(&ip->i_pincount) == 0);
 123        ASSERT(!spin_is_locked(&ip->i_flags_lock));
 124        ASSERT(!xfs_isiflocked(ip));
 125
 126        /*
 127         * Because we use RCU freeing we need to ensure the inode always
 128         * appears to be reclaimed with an invalid inode number when in the
 129         * free state. The ip->i_flags_lock provides the barrier against lookup
 130         * races.
 131         */
 132        spin_lock(&ip->i_flags_lock);
 133        ip->i_flags = XFS_IRECLAIM;
 134        ip->i_ino = 0;
 135        spin_unlock(&ip->i_flags_lock);
 136
 137        call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 138}
 139
 140/*
 141 * Check the validity of the inode we just found it the cache
 142 */
 143static int
 144xfs_iget_cache_hit(
 145        struct xfs_perag        *pag,
 146        struct xfs_inode        *ip,
 147        xfs_ino_t               ino,
 148        int                     flags,
 149        int                     lock_flags) __releases(RCU)
 150{
 151        struct inode            *inode = VFS_I(ip);
 152        struct xfs_mount        *mp = ip->i_mount;
 153        int                     error;
 154
 155        /*
 156         * check for re-use of an inode within an RCU grace period due to the
 157         * radix tree nodes not being updated yet. We monitor for this by
 158         * setting the inode number to zero before freeing the inode structure.
 159         * If the inode has been reallocated and set up, then the inode number
 160         * will not match, so check for that, too.
 161         */
 162        spin_lock(&ip->i_flags_lock);
 163        if (ip->i_ino != ino) {
 164                trace_xfs_iget_skip(ip);
 165                XFS_STATS_INC(xs_ig_frecycle);
 166                error = EAGAIN;
 167                goto out_error;
 168        }
 169
 170
 171        /*
 172         * If we are racing with another cache hit that is currently
 173         * instantiating this inode or currently recycling it out of
 174         * reclaimabe state, wait for the initialisation to complete
 175         * before continuing.
 176         *
 177         * XXX(hch): eventually we should do something equivalent to
 178         *           wait_on_inode to wait for these flags to be cleared
 179         *           instead of polling for it.
 180         */
 181        if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
 182                trace_xfs_iget_skip(ip);
 183                XFS_STATS_INC(xs_ig_frecycle);
 184                error = EAGAIN;
 185                goto out_error;
 186        }
 187
 188        /*
 189         * If lookup is racing with unlink return an error immediately.
 190         */
 191        if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
 192                error = ENOENT;
 193                goto out_error;
 194        }
 195
 196        /*
 197         * If IRECLAIMABLE is set, we've torn down the VFS inode already.
 198         * Need to carefully get it back into useable state.
 199         */
 200        if (ip->i_flags & XFS_IRECLAIMABLE) {
 201                trace_xfs_iget_reclaim(ip);
 202
 203                /*
 204                 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
 205                 * from stomping over us while we recycle the inode.  We can't
 206                 * clear the radix tree reclaimable tag yet as it requires
 207                 * pag_ici_lock to be held exclusive.
 208                 */
 209                ip->i_flags |= XFS_IRECLAIM;
 210
 211                spin_unlock(&ip->i_flags_lock);
 212                rcu_read_unlock();
 213
 214                error = -inode_init_always(mp->m_super, inode);
 215                if (error) {
 216                        /*
 217                         * Re-initializing the inode failed, and we are in deep
 218                         * trouble.  Try to re-add it to the reclaim list.
 219                         */
 220                        rcu_read_lock();
 221                        spin_lock(&ip->i_flags_lock);
 222
 223                        ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
 224                        ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 225                        trace_xfs_iget_reclaim_fail(ip);
 226                        goto out_error;
 227                }
 228
 229                spin_lock(&pag->pag_ici_lock);
 230                spin_lock(&ip->i_flags_lock);
 231
 232                /*
 233                 * Clear the per-lifetime state in the inode as we are now
 234                 * effectively a new inode and need to return to the initial
 235                 * state before reuse occurs.
 236                 */
 237                ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 238                ip->i_flags |= XFS_INEW;
 239                __xfs_inode_clear_reclaim_tag(mp, pag, ip);
 240                inode->i_state = I_NEW;
 241
 242                ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 243                mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
 244
 245                spin_unlock(&ip->i_flags_lock);
 246                spin_unlock(&pag->pag_ici_lock);
 247        } else {
 248                /* If the VFS inode is being torn down, pause and try again. */
 249                if (!igrab(inode)) {
 250                        trace_xfs_iget_skip(ip);
 251                        error = EAGAIN;
 252                        goto out_error;
 253                }
 254
 255                /* We've got a live one. */
 256                spin_unlock(&ip->i_flags_lock);
 257                rcu_read_unlock();
 258                trace_xfs_iget_hit(ip);
 259        }
 260
 261        if (lock_flags != 0)
 262                xfs_ilock(ip, lock_flags);
 263
 264        xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
 265        XFS_STATS_INC(xs_ig_found);
 266
 267        return 0;
 268
 269out_error:
 270        spin_unlock(&ip->i_flags_lock);
 271        rcu_read_unlock();
 272        return error;
 273}
 274
 275
 276static int
 277xfs_iget_cache_miss(
 278        struct xfs_mount        *mp,
 279        struct xfs_perag        *pag,
 280        xfs_trans_t             *tp,
 281        xfs_ino_t               ino,
 282        struct xfs_inode        **ipp,
 283        int                     flags,
 284        int                     lock_flags)
 285{
 286        struct xfs_inode        *ip;
 287        int                     error;
 288        xfs_agino_t             agino = XFS_INO_TO_AGINO(mp, ino);
 289        int                     iflags;
 290
 291        ip = xfs_inode_alloc(mp, ino);
 292        if (!ip)
 293                return ENOMEM;
 294
 295        error = xfs_iread(mp, tp, ip, flags);
 296        if (error)
 297                goto out_destroy;
 298
 299        trace_xfs_iget_miss(ip);
 300
 301        if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
 302                error = ENOENT;
 303                goto out_destroy;
 304        }
 305
 306        /*
 307         * Preload the radix tree so we can insert safely under the
 308         * write spinlock. Note that we cannot sleep inside the preload
 309         * region. Since we can be called from transaction context, don't
 310         * recurse into the file system.
 311         */
 312        if (radix_tree_preload(GFP_NOFS)) {
 313                error = EAGAIN;
 314                goto out_destroy;
 315        }
 316
 317        /*
 318         * Because the inode hasn't been added to the radix-tree yet it can't
 319         * be found by another thread, so we can do the non-sleeping lock here.
 320         */
 321        if (lock_flags) {
 322                if (!xfs_ilock_nowait(ip, lock_flags))
 323                        BUG();
 324        }
 325
 326        /*
 327         * These values must be set before inserting the inode into the radix
 328         * tree as the moment it is inserted a concurrent lookup (allowed by the
 329         * RCU locking mechanism) can find it and that lookup must see that this
 330         * is an inode currently under construction (i.e. that XFS_INEW is set).
 331         * The ip->i_flags_lock that protects the XFS_INEW flag forms the
 332         * memory barrier that ensures this detection works correctly at lookup
 333         * time.
 334         */
 335        iflags = XFS_INEW;
 336        if (flags & XFS_IGET_DONTCACHE)
 337                iflags |= XFS_IDONTCACHE;
 338        ip->i_udquot = ip->i_gdquot = NULL;
 339        xfs_iflags_set(ip, iflags);
 340
 341        /* insert the new inode */
 342        spin_lock(&pag->pag_ici_lock);
 343        error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
 344        if (unlikely(error)) {
 345                WARN_ON(error != -EEXIST);
 346                XFS_STATS_INC(xs_ig_dup);
 347                error = EAGAIN;
 348                goto out_preload_end;
 349        }
 350        spin_unlock(&pag->pag_ici_lock);
 351        radix_tree_preload_end();
 352
 353        *ipp = ip;
 354        return 0;
 355
 356out_preload_end:
 357        spin_unlock(&pag->pag_ici_lock);
 358        radix_tree_preload_end();
 359        if (lock_flags)
 360                xfs_iunlock(ip, lock_flags);
 361out_destroy:
 362        __destroy_inode(VFS_I(ip));
 363        xfs_inode_free(ip);
 364        return error;
 365}
 366
 367/*
 368 * Look up an inode by number in the given file system.
 369 * The inode is looked up in the cache held in each AG.
 370 * If the inode is found in the cache, initialise the vfs inode
 371 * if necessary.
 372 *
 373 * If it is not in core, read it in from the file system's device,
 374 * add it to the cache and initialise the vfs inode.
 375 *
 376 * The inode is locked according to the value of the lock_flags parameter.
 377 * This flag parameter indicates how and if the inode's IO lock and inode lock
 378 * should be taken.
 379 *
 380 * mp -- the mount point structure for the current file system.  It points
 381 *       to the inode hash table.
 382 * tp -- a pointer to the current transaction if there is one.  This is
 383 *       simply passed through to the xfs_iread() call.
 384 * ino -- the number of the inode desired.  This is the unique identifier
 385 *        within the file system for the inode being requested.
 386 * lock_flags -- flags indicating how to lock the inode.  See the comment
 387 *               for xfs_ilock() for a list of valid values.
 388 */
 389int
 390xfs_iget(
 391        xfs_mount_t     *mp,
 392        xfs_trans_t     *tp,
 393        xfs_ino_t       ino,
 394        uint            flags,
 395        uint            lock_flags,
 396        xfs_inode_t     **ipp)
 397{
 398        xfs_inode_t     *ip;
 399        int             error;
 400        xfs_perag_t     *pag;
 401        xfs_agino_t     agino;
 402
 403        /*
 404         * xfs_reclaim_inode() uses the ILOCK to ensure an inode
 405         * doesn't get freed while it's being referenced during a
 406         * radix tree traversal here.  It assumes this function
 407         * aqcuires only the ILOCK (and therefore it has no need to
 408         * involve the IOLOCK in this synchronization).
 409         */
 410        ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
 411
 412        /* reject inode numbers outside existing AGs */
 413        if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 414                return EINVAL;
 415
 416        /* get the perag structure and ensure that it's inode capable */
 417        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 418        agino = XFS_INO_TO_AGINO(mp, ino);
 419
 420again:
 421        error = 0;
 422        rcu_read_lock();
 423        ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 424
 425        if (ip) {
 426                error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 427                if (error)
 428                        goto out_error_or_again;
 429        } else {
 430                rcu_read_unlock();
 431                XFS_STATS_INC(xs_ig_missed);
 432
 433                error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
 434                                                        flags, lock_flags);
 435                if (error)
 436                        goto out_error_or_again;
 437        }
 438        xfs_perag_put(pag);
 439
 440        *ipp = ip;
 441
 442        /*
 443         * If we have a real type for an on-disk inode, we can set ops(&unlock)
 444         * now.  If it's a new inode being created, xfs_ialloc will handle it.
 445         */
 446        if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
 447                xfs_setup_inode(ip);
 448        return 0;
 449
 450out_error_or_again:
 451        if (error == EAGAIN) {
 452                delay(1);
 453                goto again;
 454        }
 455        xfs_perag_put(pag);
 456        return error;
 457}
 458
 459/*
 460 * The inode lookup is done in batches to keep the amount of lock traffic and
 461 * radix tree lookups to a minimum. The batch size is a trade off between
 462 * lookup reduction and stack usage. This is in the reclaim path, so we can't
 463 * be too greedy.
 464 */
 465#define XFS_LOOKUP_BATCH        32
 466
 467STATIC int
 468xfs_inode_ag_walk_grab(
 469        struct xfs_inode        *ip)
 470{
 471        struct inode            *inode = VFS_I(ip);
 472
 473        ASSERT(rcu_read_lock_held());
 474
 475        /*
 476         * check for stale RCU freed inode
 477         *
 478         * If the inode has been reallocated, it doesn't matter if it's not in
 479         * the AG we are walking - we are walking for writeback, so if it
 480         * passes all the "valid inode" checks and is dirty, then we'll write
 481         * it back anyway.  If it has been reallocated and still being
 482         * initialised, the XFS_INEW check below will catch it.
 483         */
 484        spin_lock(&ip->i_flags_lock);
 485        if (!ip->i_ino)
 486                goto out_unlock_noent;
 487
 488        /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
 489        if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
 490                goto out_unlock_noent;
 491        spin_unlock(&ip->i_flags_lock);
 492
 493        /* nothing to sync during shutdown */
 494        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 495                return EFSCORRUPTED;
 496
 497        /* If we can't grab the inode, it must on it's way to reclaim. */
 498        if (!igrab(inode))
 499                return ENOENT;
 500
 501        if (is_bad_inode(inode)) {
 502                IRELE(ip);
 503                return ENOENT;
 504        }
 505
 506        /* inode is valid */
 507        return 0;
 508
 509out_unlock_noent:
 510        spin_unlock(&ip->i_flags_lock);
 511        return ENOENT;
 512}
 513
 514STATIC int
 515xfs_inode_ag_walk(
 516        struct xfs_mount        *mp,
 517        struct xfs_perag        *pag,
 518        int                     (*execute)(struct xfs_inode *ip,
 519                                           struct xfs_perag *pag, int flags,
 520                                           void *args),
 521        int                     flags,
 522        void                    *args,
 523        int                     tag)
 524{
 525        uint32_t                first_index;
 526        int                     last_error = 0;
 527        int                     skipped;
 528        int                     done;
 529        int                     nr_found;
 530
 531restart:
 532        done = 0;
 533        skipped = 0;
 534        first_index = 0;
 535        nr_found = 0;
 536        do {
 537                struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 538                int             error = 0;
 539                int             i;
 540
 541                rcu_read_lock();
 542
 543                if (tag == -1)
 544                        nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 545                                        (void **)batch, first_index,
 546                                        XFS_LOOKUP_BATCH);
 547                else
 548                        nr_found = radix_tree_gang_lookup_tag(
 549                                        &pag->pag_ici_root,
 550                                        (void **) batch, first_index,
 551                                        XFS_LOOKUP_BATCH, tag);
 552
 553                if (!nr_found) {
 554                        rcu_read_unlock();
 555                        break;
 556                }
 557
 558                /*
 559                 * Grab the inodes before we drop the lock. if we found
 560                 * nothing, nr == 0 and the loop will be skipped.
 561                 */
 562                for (i = 0; i < nr_found; i++) {
 563                        struct xfs_inode *ip = batch[i];
 564
 565                        if (done || xfs_inode_ag_walk_grab(ip))
 566                                batch[i] = NULL;
 567
 568                        /*
 569                         * Update the index for the next lookup. Catch
 570                         * overflows into the next AG range which can occur if
 571                         * we have inodes in the last block of the AG and we
 572                         * are currently pointing to the last inode.
 573                         *
 574                         * Because we may see inodes that are from the wrong AG
 575                         * due to RCU freeing and reallocation, only update the
 576                         * index if it lies in this AG. It was a race that lead
 577                         * us to see this inode, so another lookup from the
 578                         * same index will not find it again.
 579                         */
 580                        if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
 581                                continue;
 582                        first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 583                        if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 584                                done = 1;
 585                }
 586
 587                /* unlock now we've grabbed the inodes. */
 588                rcu_read_unlock();
 589
 590                for (i = 0; i < nr_found; i++) {
 591                        if (!batch[i])
 592                                continue;
 593                        error = execute(batch[i], pag, flags, args);
 594                        IRELE(batch[i]);
 595                        if (error == EAGAIN) {
 596                                skipped++;
 597                                continue;
 598                        }
 599                        if (error && last_error != EFSCORRUPTED)
 600                                last_error = error;
 601                }
 602
 603                /* bail out if the filesystem is corrupted.  */
 604                if (error == EFSCORRUPTED)
 605                        break;
 606
 607                cond_resched();
 608
 609        } while (nr_found && !done);
 610
 611        if (skipped) {
 612                delay(1);
 613                goto restart;
 614        }
 615        return last_error;
 616}
 617
 618/*
 619 * Background scanning to trim post-EOF preallocated space. This is queued
 620 * based on the 'background_prealloc_discard_period' tunable (5m by default).
 621 */
 622STATIC void
 623xfs_queue_eofblocks(
 624        struct xfs_mount *mp)
 625{
 626        rcu_read_lock();
 627        if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
 628                queue_delayed_work(mp->m_eofblocks_workqueue,
 629                                   &mp->m_eofblocks_work,
 630                                   msecs_to_jiffies(xfs_eofb_secs * 1000));
 631        rcu_read_unlock();
 632}
 633
 634void
 635xfs_eofblocks_worker(
 636        struct work_struct *work)
 637{
 638        struct xfs_mount *mp = container_of(to_delayed_work(work),
 639                                struct xfs_mount, m_eofblocks_work);
 640        xfs_icache_free_eofblocks(mp, NULL);
 641        xfs_queue_eofblocks(mp);
 642}
 643
 644int
 645xfs_inode_ag_iterator(
 646        struct xfs_mount        *mp,
 647        int                     (*execute)(struct xfs_inode *ip,
 648                                           struct xfs_perag *pag, int flags,
 649                                           void *args),
 650        int                     flags,
 651        void                    *args)
 652{
 653        struct xfs_perag        *pag;
 654        int                     error = 0;
 655        int                     last_error = 0;
 656        xfs_agnumber_t          ag;
 657
 658        ag = 0;
 659        while ((pag = xfs_perag_get(mp, ag))) {
 660                ag = pag->pag_agno + 1;
 661                error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
 662                xfs_perag_put(pag);
 663                if (error) {
 664                        last_error = error;
 665                        if (error == EFSCORRUPTED)
 666                                break;
 667                }
 668        }
 669        return XFS_ERROR(last_error);
 670}
 671
 672int
 673xfs_inode_ag_iterator_tag(
 674        struct xfs_mount        *mp,
 675        int                     (*execute)(struct xfs_inode *ip,
 676                                           struct xfs_perag *pag, int flags,
 677                                           void *args),
 678        int                     flags,
 679        void                    *args,
 680        int                     tag)
 681{
 682        struct xfs_perag        *pag;
 683        int                     error = 0;
 684        int                     last_error = 0;
 685        xfs_agnumber_t          ag;
 686
 687        ag = 0;
 688        while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
 689                ag = pag->pag_agno + 1;
 690                error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
 691                xfs_perag_put(pag);
 692                if (error) {
 693                        last_error = error;
 694                        if (error == EFSCORRUPTED)
 695                                break;
 696                }
 697        }
 698        return XFS_ERROR(last_error);
 699}
 700
 701/*
 702 * Queue a new inode reclaim pass if there are reclaimable inodes and there
 703 * isn't a reclaim pass already in progress. By default it runs every 5s based
 704 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
 705 * tunable, but that can be done if this method proves to be ineffective or too
 706 * aggressive.
 707 */
 708static void
 709xfs_reclaim_work_queue(
 710        struct xfs_mount        *mp)
 711{
 712
 713        rcu_read_lock();
 714        if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 715                queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
 716                        msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
 717        }
 718        rcu_read_unlock();
 719}
 720
 721/*
 722 * This is a fast pass over the inode cache to try to get reclaim moving on as
 723 * many inodes as possible in a short period of time. It kicks itself every few
 724 * seconds, as well as being kicked by the inode cache shrinker when memory
 725 * goes low. It scans as quickly as possible avoiding locked inodes or those
 726 * already being flushed, and once done schedules a future pass.
 727 */
 728void
 729xfs_reclaim_worker(
 730        struct work_struct *work)
 731{
 732        struct xfs_mount *mp = container_of(to_delayed_work(work),
 733                                        struct xfs_mount, m_reclaim_work);
 734
 735        xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
 736        xfs_reclaim_work_queue(mp);
 737}
 738
 739static void
 740__xfs_inode_set_reclaim_tag(
 741        struct xfs_perag        *pag,
 742        struct xfs_inode        *ip)
 743{
 744        radix_tree_tag_set(&pag->pag_ici_root,
 745                           XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
 746                           XFS_ICI_RECLAIM_TAG);
 747
 748        if (!pag->pag_ici_reclaimable) {
 749                /* propagate the reclaim tag up into the perag radix tree */
 750                spin_lock(&ip->i_mount->m_perag_lock);
 751                radix_tree_tag_set(&ip->i_mount->m_perag_tree,
 752                                XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
 753                                XFS_ICI_RECLAIM_TAG);
 754                spin_unlock(&ip->i_mount->m_perag_lock);
 755
 756                /* schedule periodic background inode reclaim */
 757                xfs_reclaim_work_queue(ip->i_mount);
 758
 759                trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
 760                                                        -1, _RET_IP_);
 761        }
 762        pag->pag_ici_reclaimable++;
 763}
 764
 765/*
 766 * We set the inode flag atomically with the radix tree tag.
 767 * Once we get tag lookups on the radix tree, this inode flag
 768 * can go away.
 769 */
 770void
 771xfs_inode_set_reclaim_tag(
 772        xfs_inode_t     *ip)
 773{
 774        struct xfs_mount *mp = ip->i_mount;
 775        struct xfs_perag *pag;
 776
 777        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 778        spin_lock(&pag->pag_ici_lock);
 779        spin_lock(&ip->i_flags_lock);
 780        __xfs_inode_set_reclaim_tag(pag, ip);
 781        __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 782        spin_unlock(&ip->i_flags_lock);
 783        spin_unlock(&pag->pag_ici_lock);
 784        xfs_perag_put(pag);
 785}
 786
 787STATIC void
 788__xfs_inode_clear_reclaim(
 789        xfs_perag_t     *pag,
 790        xfs_inode_t     *ip)
 791{
 792        pag->pag_ici_reclaimable--;
 793        if (!pag->pag_ici_reclaimable) {
 794                /* clear the reclaim tag from the perag radix tree */
 795                spin_lock(&ip->i_mount->m_perag_lock);
 796                radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
 797                                XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
 798                                XFS_ICI_RECLAIM_TAG);
 799                spin_unlock(&ip->i_mount->m_perag_lock);
 800                trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
 801                                                        -1, _RET_IP_);
 802        }
 803}
 804
 805STATIC void
 806__xfs_inode_clear_reclaim_tag(
 807        xfs_mount_t     *mp,
 808        xfs_perag_t     *pag,
 809        xfs_inode_t     *ip)
 810{
 811        radix_tree_tag_clear(&pag->pag_ici_root,
 812                        XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
 813        __xfs_inode_clear_reclaim(pag, ip);
 814}
 815
 816/*
 817 * Grab the inode for reclaim exclusively.
 818 * Return 0 if we grabbed it, non-zero otherwise.
 819 */
 820STATIC int
 821xfs_reclaim_inode_grab(
 822        struct xfs_inode        *ip,
 823        int                     flags)
 824{
 825        ASSERT(rcu_read_lock_held());
 826
 827        /* quick check for stale RCU freed inode */
 828        if (!ip->i_ino)
 829                return 1;
 830
 831        /*
 832         * If we are asked for non-blocking operation, do unlocked checks to
 833         * see if the inode already is being flushed or in reclaim to avoid
 834         * lock traffic.
 835         */
 836        if ((flags & SYNC_TRYLOCK) &&
 837            __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
 838                return 1;
 839
 840        /*
 841         * The radix tree lock here protects a thread in xfs_iget from racing
 842         * with us starting reclaim on the inode.  Once we have the
 843         * XFS_IRECLAIM flag set it will not touch us.
 844         *
 845         * Due to RCU lookup, we may find inodes that have been freed and only
 846         * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
 847         * aren't candidates for reclaim at all, so we must check the
 848         * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
 849         */
 850        spin_lock(&ip->i_flags_lock);
 851        if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
 852            __xfs_iflags_test(ip, XFS_IRECLAIM)) {
 853                /* not a reclaim candidate. */
 854                spin_unlock(&ip->i_flags_lock);
 855                return 1;
 856        }
 857        __xfs_iflags_set(ip, XFS_IRECLAIM);
 858        spin_unlock(&ip->i_flags_lock);
 859        return 0;
 860}
 861
 862/*
 863 * Inodes in different states need to be treated differently. The following
 864 * table lists the inode states and the reclaim actions necessary:
 865 *
 866 *      inode state          iflush ret         required action
 867 *      ---------------      ----------         ---------------
 868 *      bad                     -               reclaim
 869 *      shutdown                EIO             unpin and reclaim
 870 *      clean, unpinned         0               reclaim
 871 *      stale, unpinned         0               reclaim
 872 *      clean, pinned(*)        0               requeue
 873 *      stale, pinned           EAGAIN          requeue
 874 *      dirty, async            -               requeue
 875 *      dirty, sync             0               reclaim
 876 *
 877 * (*) dgc: I don't think the clean, pinned state is possible but it gets
 878 * handled anyway given the order of checks implemented.
 879 *
 880 * Also, because we get the flush lock first, we know that any inode that has
 881 * been flushed delwri has had the flush completed by the time we check that
 882 * the inode is clean.
 883 *
 884 * Note that because the inode is flushed delayed write by AIL pushing, the
 885 * flush lock may already be held here and waiting on it can result in very
 886 * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
 887 * the caller should push the AIL first before trying to reclaim inodes to
 888 * minimise the amount of time spent waiting.  For background relaim, we only
 889 * bother to reclaim clean inodes anyway.
 890 *
 891 * Hence the order of actions after gaining the locks should be:
 892 *      bad             => reclaim
 893 *      shutdown        => unpin and reclaim
 894 *      pinned, async   => requeue
 895 *      pinned, sync    => unpin
 896 *      stale           => reclaim
 897 *      clean           => reclaim
 898 *      dirty, async    => requeue
 899 *      dirty, sync     => flush, wait and reclaim
 900 */
 901STATIC int
 902xfs_reclaim_inode(
 903        struct xfs_inode        *ip,
 904        struct xfs_perag        *pag,
 905        int                     sync_mode)
 906{
 907        struct xfs_buf          *bp = NULL;
 908        int                     error;
 909
 910restart:
 911        error = 0;
 912        xfs_ilock(ip, XFS_ILOCK_EXCL);
 913        if (!xfs_iflock_nowait(ip)) {
 914                if (!(sync_mode & SYNC_WAIT))
 915                        goto out;
 916                xfs_iflock(ip);
 917        }
 918
 919        if (is_bad_inode(VFS_I(ip)))
 920                goto reclaim;
 921        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 922                xfs_iunpin_wait(ip);
 923                xfs_iflush_abort(ip, false);
 924                goto reclaim;
 925        }
 926        if (xfs_ipincount(ip)) {
 927                if (!(sync_mode & SYNC_WAIT))
 928                        goto out_ifunlock;
 929                xfs_iunpin_wait(ip);
 930        }
 931        if (xfs_iflags_test(ip, XFS_ISTALE))
 932                goto reclaim;
 933        if (xfs_inode_clean(ip))
 934                goto reclaim;
 935
 936        /*
 937         * Never flush out dirty data during non-blocking reclaim, as it would
 938         * just contend with AIL pushing trying to do the same job.
 939         */
 940        if (!(sync_mode & SYNC_WAIT))
 941                goto out_ifunlock;
 942
 943        /*
 944         * Now we have an inode that needs flushing.
 945         *
 946         * Note that xfs_iflush will never block on the inode buffer lock, as
 947         * xfs_ifree_cluster() can lock the inode buffer before it locks the
 948         * ip->i_lock, and we are doing the exact opposite here.  As a result,
 949         * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
 950         * result in an ABBA deadlock with xfs_ifree_cluster().
 951         *
 952         * As xfs_ifree_cluser() must gather all inodes that are active in the
 953         * cache to mark them stale, if we hit this case we don't actually want
 954         * to do IO here - we want the inode marked stale so we can simply
 955         * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
 956         * inode, back off and try again.  Hopefully the next pass through will
 957         * see the stale flag set on the inode.
 958         */
 959        error = xfs_iflush(ip, &bp);
 960        if (error == EAGAIN) {
 961                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 962                /* backoff longer than in xfs_ifree_cluster */
 963                delay(2);
 964                goto restart;
 965        }
 966
 967        if (!error) {
 968                error = xfs_bwrite(bp);
 969                xfs_buf_relse(bp);
 970        }
 971
 972        xfs_iflock(ip);
 973reclaim:
 974        xfs_ifunlock(ip);
 975        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 976
 977        XFS_STATS_INC(xs_ig_reclaims);
 978        /*
 979         * Remove the inode from the per-AG radix tree.
 980         *
 981         * Because radix_tree_delete won't complain even if the item was never
 982         * added to the tree assert that it's been there before to catch
 983         * problems with the inode life time early on.
 984         */
 985        spin_lock(&pag->pag_ici_lock);
 986        if (!radix_tree_delete(&pag->pag_ici_root,
 987                                XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
 988                ASSERT(0);
 989        __xfs_inode_clear_reclaim(pag, ip);
 990        spin_unlock(&pag->pag_ici_lock);
 991
 992        /*
 993         * Here we do an (almost) spurious inode lock in order to coordinate
 994         * with inode cache radix tree lookups.  This is because the lookup
 995         * can reference the inodes in the cache without taking references.
 996         *
 997         * We make that OK here by ensuring that we wait until the inode is
 998         * unlocked after the lookup before we go ahead and free it.
 999         */
1000        xfs_ilock(ip, XFS_ILOCK_EXCL);
1001        xfs_qm_dqdetach(ip);
1002        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1003
1004        xfs_inode_free(ip);
1005        return error;
1006
1007out_ifunlock:
1008        xfs_ifunlock(ip);
1009out:
1010        xfs_iflags_clear(ip, XFS_IRECLAIM);
1011        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1012        /*
1013         * We could return EAGAIN here to make reclaim rescan the inode tree in
1014         * a short while. However, this just burns CPU time scanning the tree
1015         * waiting for IO to complete and the reclaim work never goes back to
1016         * the idle state. Instead, return 0 to let the next scheduled
1017         * background reclaim attempt to reclaim the inode again.
1018         */
1019        return 0;
1020}
1021
1022/*
1023 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1024 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1025 * then a shut down during filesystem unmount reclaim walk leak all the
1026 * unreclaimed inodes.
1027 */
1028STATIC int
1029xfs_reclaim_inodes_ag(
1030        struct xfs_mount        *mp,
1031        int                     flags,
1032        int                     *nr_to_scan)
1033{
1034        struct xfs_perag        *pag;
1035        int                     error = 0;
1036        int                     last_error = 0;
1037        xfs_agnumber_t          ag;
1038        int                     trylock = flags & SYNC_TRYLOCK;
1039        int                     skipped;
1040
1041restart:
1042        ag = 0;
1043        skipped = 0;
1044        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1045                unsigned long   first_index = 0;
1046                int             done = 0;
1047                int             nr_found = 0;
1048
1049                ag = pag->pag_agno + 1;
1050
1051                if (trylock) {
1052                        if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1053                                skipped++;
1054                                xfs_perag_put(pag);
1055                                continue;
1056                        }
1057                        first_index = pag->pag_ici_reclaim_cursor;
1058                } else
1059                        mutex_lock(&pag->pag_ici_reclaim_lock);
1060
1061                do {
1062                        struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1063                        int     i;
1064
1065                        rcu_read_lock();
1066                        nr_found = radix_tree_gang_lookup_tag(
1067                                        &pag->pag_ici_root,
1068                                        (void **)batch, first_index,
1069                                        XFS_LOOKUP_BATCH,
1070                                        XFS_ICI_RECLAIM_TAG);
1071                        if (!nr_found) {
1072                                done = 1;
1073                                rcu_read_unlock();
1074                                break;
1075                        }
1076
1077                        /*
1078                         * Grab the inodes before we drop the lock. if we found
1079                         * nothing, nr == 0 and the loop will be skipped.
1080                         */
1081                        for (i = 0; i < nr_found; i++) {
1082                                struct xfs_inode *ip = batch[i];
1083
1084                                if (done || xfs_reclaim_inode_grab(ip, flags))
1085                                        batch[i] = NULL;
1086
1087                                /*
1088                                 * Update the index for the next lookup. Catch
1089                                 * overflows into the next AG range which can
1090                                 * occur if we have inodes in the last block of
1091                                 * the AG and we are currently pointing to the
1092                                 * last inode.
1093                                 *
1094                                 * Because we may see inodes that are from the
1095                                 * wrong AG due to RCU freeing and
1096                                 * reallocation, only update the index if it
1097                                 * lies in this AG. It was a race that lead us
1098                                 * to see this inode, so another lookup from
1099                                 * the same index will not find it again.
1100                                 */
1101                                if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1102                                                                pag->pag_agno)
1103                                        continue;
1104                                first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1105                                if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1106                                        done = 1;
1107                        }
1108
1109                        /* unlock now we've grabbed the inodes. */
1110                        rcu_read_unlock();
1111
1112                        for (i = 0; i < nr_found; i++) {
1113                                if (!batch[i])
1114                                        continue;
1115                                error = xfs_reclaim_inode(batch[i], pag, flags);
1116                                if (error && last_error != EFSCORRUPTED)
1117                                        last_error = error;
1118                        }
1119
1120                        *nr_to_scan -= XFS_LOOKUP_BATCH;
1121
1122                        cond_resched();
1123
1124                } while (nr_found && !done && *nr_to_scan > 0);
1125
1126                if (trylock && !done)
1127                        pag->pag_ici_reclaim_cursor = first_index;
1128                else
1129                        pag->pag_ici_reclaim_cursor = 0;
1130                mutex_unlock(&pag->pag_ici_reclaim_lock);
1131                xfs_perag_put(pag);
1132        }
1133
1134        /*
1135         * if we skipped any AG, and we still have scan count remaining, do
1136         * another pass this time using blocking reclaim semantics (i.e
1137         * waiting on the reclaim locks and ignoring the reclaim cursors). This
1138         * ensure that when we get more reclaimers than AGs we block rather
1139         * than spin trying to execute reclaim.
1140         */
1141        if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1142                trylock = 0;
1143                goto restart;
1144        }
1145        return XFS_ERROR(last_error);
1146}
1147
1148int
1149xfs_reclaim_inodes(
1150        xfs_mount_t     *mp,
1151        int             mode)
1152{
1153        int             nr_to_scan = INT_MAX;
1154
1155        return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1156}
1157
1158/*
1159 * Scan a certain number of inodes for reclaim.
1160 *
1161 * When called we make sure that there is a background (fast) inode reclaim in
1162 * progress, while we will throttle the speed of reclaim via doing synchronous
1163 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1164 * them to be cleaned, which we hope will not be very long due to the
1165 * background walker having already kicked the IO off on those dirty inodes.
1166 */
1167void
1168xfs_reclaim_inodes_nr(
1169        struct xfs_mount        *mp,
1170        int                     nr_to_scan)
1171{
1172        /* kick background reclaimer and push the AIL */
1173        xfs_reclaim_work_queue(mp);
1174        xfs_ail_push_all(mp->m_ail);
1175
1176        xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1177}
1178
1179/*
1180 * Return the number of reclaimable inodes in the filesystem for
1181 * the shrinker to determine how much to reclaim.
1182 */
1183int
1184xfs_reclaim_inodes_count(
1185        struct xfs_mount        *mp)
1186{
1187        struct xfs_perag        *pag;
1188        xfs_agnumber_t          ag = 0;
1189        int                     reclaimable = 0;
1190
1191        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1192                ag = pag->pag_agno + 1;
1193                reclaimable += pag->pag_ici_reclaimable;
1194                xfs_perag_put(pag);
1195        }
1196        return reclaimable;
1197}
1198
1199STATIC int
1200xfs_inode_match_id(
1201        struct xfs_inode        *ip,
1202        struct xfs_eofblocks    *eofb)
1203{
1204        if (eofb->eof_flags & XFS_EOF_FLAGS_UID &&
1205            ip->i_d.di_uid != eofb->eof_uid)
1206                return 0;
1207
1208        if (eofb->eof_flags & XFS_EOF_FLAGS_GID &&
1209            ip->i_d.di_gid != eofb->eof_gid)
1210                return 0;
1211
1212        if (eofb->eof_flags & XFS_EOF_FLAGS_PRID &&
1213            xfs_get_projid(ip) != eofb->eof_prid)
1214                return 0;
1215
1216        return 1;
1217}
1218
1219STATIC int
1220xfs_inode_free_eofblocks(
1221        struct xfs_inode        *ip,
1222        struct xfs_perag        *pag,
1223        int                     flags,
1224        void                    *args)
1225{
1226        int ret;
1227        struct xfs_eofblocks *eofb = args;
1228
1229        if (!xfs_can_free_eofblocks(ip, false)) {
1230                /* inode could be preallocated or append-only */
1231                trace_xfs_inode_free_eofblocks_invalid(ip);
1232                xfs_inode_clear_eofblocks_tag(ip);
1233                return 0;
1234        }
1235
1236        /*
1237         * If the mapping is dirty the operation can block and wait for some
1238         * time. Unless we are waiting, skip it.
1239         */
1240        if (!(flags & SYNC_WAIT) &&
1241            mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1242                return 0;
1243
1244        if (eofb) {
1245                if (!xfs_inode_match_id(ip, eofb))
1246                        return 0;
1247
1248                /* skip the inode if the file size is too small */
1249                if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1250                    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1251                        return 0;
1252        }
1253
1254        ret = xfs_free_eofblocks(ip->i_mount, ip, true);
1255
1256        /* don't revisit the inode if we're not waiting */
1257        if (ret == EAGAIN && !(flags & SYNC_WAIT))
1258                ret = 0;
1259
1260        return ret;
1261}
1262
1263int
1264xfs_icache_free_eofblocks(
1265        struct xfs_mount        *mp,
1266        struct xfs_eofblocks    *eofb)
1267{
1268        int flags = SYNC_TRYLOCK;
1269
1270        if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1271                flags = SYNC_WAIT;
1272
1273        return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1274                                         eofb, XFS_ICI_EOFBLOCKS_TAG);
1275}
1276
1277void
1278xfs_inode_set_eofblocks_tag(
1279        xfs_inode_t     *ip)
1280{
1281        struct xfs_mount *mp = ip->i_mount;
1282        struct xfs_perag *pag;
1283        int tagged;
1284
1285        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1286        spin_lock(&pag->pag_ici_lock);
1287        trace_xfs_inode_set_eofblocks_tag(ip);
1288
1289        tagged = radix_tree_tagged(&pag->pag_ici_root,
1290                                   XFS_ICI_EOFBLOCKS_TAG);
1291        radix_tree_tag_set(&pag->pag_ici_root,
1292                           XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1293                           XFS_ICI_EOFBLOCKS_TAG);
1294        if (!tagged) {
1295                /* propagate the eofblocks tag up into the perag radix tree */
1296                spin_lock(&ip->i_mount->m_perag_lock);
1297                radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1298                                   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1299                                   XFS_ICI_EOFBLOCKS_TAG);
1300                spin_unlock(&ip->i_mount->m_perag_lock);
1301
1302                /* kick off background trimming */
1303                xfs_queue_eofblocks(ip->i_mount);
1304
1305                trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1306                                              -1, _RET_IP_);
1307        }
1308
1309        spin_unlock(&pag->pag_ici_lock);
1310        xfs_perag_put(pag);
1311}
1312
1313void
1314xfs_inode_clear_eofblocks_tag(
1315        xfs_inode_t     *ip)
1316{
1317        struct xfs_mount *mp = ip->i_mount;
1318        struct xfs_perag *pag;
1319
1320        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1321        spin_lock(&pag->pag_ici_lock);
1322        trace_xfs_inode_clear_eofblocks_tag(ip);
1323
1324        radix_tree_tag_clear(&pag->pag_ici_root,
1325                             XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1326                             XFS_ICI_EOFBLOCKS_TAG);
1327        if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1328                /* clear the eofblocks tag from the perag radix tree */
1329                spin_lock(&ip->i_mount->m_perag_lock);
1330                radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1331                                     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1332                                     XFS_ICI_EOFBLOCKS_TAG);
1333                spin_unlock(&ip->i_mount->m_perag_lock);
1334                trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1335                                               -1, _RET_IP_);
1336        }
1337
1338        spin_unlock(&pag->pag_ici_lock);
1339        xfs_perag_put(pag);
1340}
1341
1342
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.