linux/fs/xfs/xfs_trans_ail.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * Copyright (c) 2008 Dave Chinner
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_types.h"
  22#include "xfs_log.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_mount.h"
  27#include "xfs_trans_priv.h"
  28#include "xfs_trace.h"
  29#include "xfs_error.h"
  30
  31#ifdef DEBUG
  32/*
  33 * Check that the list is sorted as it should be.
  34 */
  35STATIC void
  36xfs_ail_check(
  37        struct xfs_ail  *ailp,
  38        xfs_log_item_t  *lip)
  39{
  40        xfs_log_item_t  *prev_lip;
  41
  42        if (list_empty(&ailp->xa_ail))
  43                return;
  44
  45        /*
  46         * Check the next and previous entries are valid.
  47         */
  48        ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
  49        prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
  50        if (&prev_lip->li_ail != &ailp->xa_ail)
  51                ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
  52
  53        prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
  54        if (&prev_lip->li_ail != &ailp->xa_ail)
  55                ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
  56
  57
  58#ifdef XFS_TRANS_DEBUG
  59        /*
  60         * Walk the list checking lsn ordering, and that every entry has the
  61         * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
  62         * when specifically debugging the transaction subsystem.
  63         */
  64        prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
  65        list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
  66                if (&prev_lip->li_ail != &ailp->xa_ail)
  67                        ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
  68                ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
  69                prev_lip = lip;
  70        }
  71#endif /* XFS_TRANS_DEBUG */
  72}
  73#else /* !DEBUG */
  74#define xfs_ail_check(a,l)
  75#endif /* DEBUG */
  76
  77/*
  78 * Return a pointer to the first item in the AIL.  If the AIL is empty, then
  79 * return NULL.
  80 */
  81xfs_log_item_t *
  82xfs_ail_min(
  83        struct xfs_ail  *ailp)
  84{
  85        if (list_empty(&ailp->xa_ail))
  86                return NULL;
  87
  88        return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
  89}
  90
  91 /*
  92 * Return a pointer to the last item in the AIL.  If the AIL is empty, then
  93 * return NULL.
  94 */
  95static xfs_log_item_t *
  96xfs_ail_max(
  97        struct xfs_ail  *ailp)
  98{
  99        if (list_empty(&ailp->xa_ail))
 100                return NULL;
 101
 102        return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
 103}
 104
 105/*
 106 * Return a pointer to the item which follows the given item in the AIL.  If
 107 * the given item is the last item in the list, then return NULL.
 108 */
 109static xfs_log_item_t *
 110xfs_ail_next(
 111        struct xfs_ail  *ailp,
 112        xfs_log_item_t  *lip)
 113{
 114        if (lip->li_ail.next == &ailp->xa_ail)
 115                return NULL;
 116
 117        return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
 118}
 119
 120/*
 121 * This is called by the log manager code to determine the LSN of the tail of
 122 * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
 123 * is empty, then this function returns 0.
 124 *
 125 * We need the AIL lock in order to get a coherent read of the lsn of the last
 126 * item in the AIL.
 127 */
 128xfs_lsn_t
 129xfs_ail_min_lsn(
 130        struct xfs_ail  *ailp)
 131{
 132        xfs_lsn_t       lsn = 0;
 133        xfs_log_item_t  *lip;
 134
 135        spin_lock(&ailp->xa_lock);
 136        lip = xfs_ail_min(ailp);
 137        if (lip)
 138                lsn = lip->li_lsn;
 139        spin_unlock(&ailp->xa_lock);
 140
 141        return lsn;
 142}
 143
 144/*
 145 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
 146 */
 147static xfs_lsn_t
 148xfs_ail_max_lsn(
 149        struct xfs_ail  *ailp)
 150{
 151        xfs_lsn_t       lsn = 0;
 152        xfs_log_item_t  *lip;
 153
 154        spin_lock(&ailp->xa_lock);
 155        lip = xfs_ail_max(ailp);
 156        if (lip)
 157                lsn = lip->li_lsn;
 158        spin_unlock(&ailp->xa_lock);
 159
 160        return lsn;
 161}
 162
 163/*
 164 * The cursor keeps track of where our current traversal is up to by tracking
 165 * the next item in the list for us. However, for this to be safe, removing an
 166 * object from the AIL needs to invalidate any cursor that points to it. hence
 167 * the traversal cursor needs to be linked to the struct xfs_ail so that
 168 * deletion can search all the active cursors for invalidation.
 169 */
 170STATIC void
 171xfs_trans_ail_cursor_init(
 172        struct xfs_ail          *ailp,
 173        struct xfs_ail_cursor   *cur)
 174{
 175        cur->item = NULL;
 176        list_add_tail(&cur->list, &ailp->xa_cursors);
 177}
 178
 179/*
 180 * Get the next item in the traversal and advance the cursor.  If the cursor
 181 * was invalidated (indicated by a lip of 1), restart the traversal.
 182 */
 183struct xfs_log_item *
 184xfs_trans_ail_cursor_next(
 185        struct xfs_ail          *ailp,
 186        struct xfs_ail_cursor   *cur)
 187{
 188        struct xfs_log_item     *lip = cur->item;
 189
 190        if ((__psint_t)lip & 1)
 191                lip = xfs_ail_min(ailp);
 192        if (lip)
 193                cur->item = xfs_ail_next(ailp, lip);
 194        return lip;
 195}
 196
 197/*
 198 * When the traversal is complete, we need to remove the cursor from the list
 199 * of traversing cursors.
 200 */
 201void
 202xfs_trans_ail_cursor_done(
 203        struct xfs_ail          *ailp,
 204        struct xfs_ail_cursor   *cur)
 205{
 206        cur->item = NULL;
 207        list_del_init(&cur->list);
 208}
 209
 210/*
 211 * Invalidate any cursor that is pointing to this item. This is called when an
 212 * item is removed from the AIL. Any cursor pointing to this object is now
 213 * invalid and the traversal needs to be terminated so it doesn't reference a
 214 * freed object. We set the low bit of the cursor item pointer so we can
 215 * distinguish between an invalidation and the end of the list when getting the
 216 * next item from the cursor.
 217 */
 218STATIC void
 219xfs_trans_ail_cursor_clear(
 220        struct xfs_ail          *ailp,
 221        struct xfs_log_item     *lip)
 222{
 223        struct xfs_ail_cursor   *cur;
 224
 225        list_for_each_entry(cur, &ailp->xa_cursors, list) {
 226                if (cur->item == lip)
 227                        cur->item = (struct xfs_log_item *)
 228                                        ((__psint_t)cur->item | 1);
 229        }
 230}
 231
 232/*
 233 * Find the first item in the AIL with the given @lsn by searching in ascending
 234 * LSN order and initialise the cursor to point to the next item for a
 235 * ascending traversal.  Pass a @lsn of zero to initialise the cursor to the
 236 * first item in the AIL. Returns NULL if the list is empty.
 237 */
 238xfs_log_item_t *
 239xfs_trans_ail_cursor_first(
 240        struct xfs_ail          *ailp,
 241        struct xfs_ail_cursor   *cur,
 242        xfs_lsn_t               lsn)
 243{
 244        xfs_log_item_t          *lip;
 245
 246        xfs_trans_ail_cursor_init(ailp, cur);
 247
 248        if (lsn == 0) {
 249                lip = xfs_ail_min(ailp);
 250                goto out;
 251        }
 252
 253        list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
 254                if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
 255                        goto out;
 256        }
 257        return NULL;
 258
 259out:
 260        if (lip)
 261                cur->item = xfs_ail_next(ailp, lip);
 262        return lip;
 263}
 264
 265static struct xfs_log_item *
 266__xfs_trans_ail_cursor_last(
 267        struct xfs_ail          *ailp,
 268        xfs_lsn_t               lsn)
 269{
 270        xfs_log_item_t          *lip;
 271
 272        list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
 273                if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
 274                        return lip;
 275        }
 276        return NULL;
 277}
 278
 279/*
 280 * Find the last item in the AIL with the given @lsn by searching in descending
 281 * LSN order and initialise the cursor to point to that item.  If there is no
 282 * item with the value of @lsn, then it sets the cursor to the last item with an
 283 * LSN lower than @lsn.  Returns NULL if the list is empty.
 284 */
 285struct xfs_log_item *
 286xfs_trans_ail_cursor_last(
 287        struct xfs_ail          *ailp,
 288        struct xfs_ail_cursor   *cur,
 289        xfs_lsn_t               lsn)
 290{
 291        xfs_trans_ail_cursor_init(ailp, cur);
 292        cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
 293        return cur->item;
 294}
 295
 296/*
 297 * Splice the log item list into the AIL at the given LSN. We splice to the
 298 * tail of the given LSN to maintain insert order for push traversals. The
 299 * cursor is optional, allowing repeated updates to the same LSN to avoid
 300 * repeated traversals.  This should not be called with an empty list.
 301 */
 302static void
 303xfs_ail_splice(
 304        struct xfs_ail          *ailp,
 305        struct xfs_ail_cursor   *cur,
 306        struct list_head        *list,
 307        xfs_lsn_t               lsn)
 308{
 309        struct xfs_log_item     *lip;
 310
 311        ASSERT(!list_empty(list));
 312
 313        /*
 314         * Use the cursor to determine the insertion point if one is
 315         * provided.  If not, or if the one we got is not valid,
 316         * find the place in the AIL where the items belong.
 317         */
 318        lip = cur ? cur->item : NULL;
 319        if (!lip || (__psint_t) lip & 1)
 320                lip = __xfs_trans_ail_cursor_last(ailp, lsn);
 321
 322        /*
 323         * If a cursor is provided, we know we're processing the AIL
 324         * in lsn order, and future items to be spliced in will
 325         * follow the last one being inserted now.  Update the
 326         * cursor to point to that last item, now while we have a
 327         * reliable pointer to it.
 328         */
 329        if (cur)
 330                cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
 331
 332        /*
 333         * Finally perform the splice.  Unless the AIL was empty,
 334         * lip points to the item in the AIL _after_ which the new
 335         * items should go.  If lip is null the AIL was empty, so
 336         * the new items go at the head of the AIL.
 337         */
 338        if (lip)
 339                list_splice(list, &lip->li_ail);
 340        else
 341                list_splice(list, &ailp->xa_ail);
 342}
 343
 344/*
 345 * Delete the given item from the AIL.  Return a pointer to the item.
 346 */
 347static void
 348xfs_ail_delete(
 349        struct xfs_ail  *ailp,
 350        xfs_log_item_t  *lip)
 351{
 352        xfs_ail_check(ailp, lip);
 353        list_del(&lip->li_ail);
 354        xfs_trans_ail_cursor_clear(ailp, lip);
 355}
 356
 357static long
 358xfsaild_push(
 359        struct xfs_ail          *ailp)
 360{
 361        xfs_mount_t             *mp = ailp->xa_mount;
 362        struct xfs_ail_cursor   cur;
 363        xfs_log_item_t          *lip;
 364        xfs_lsn_t               lsn;
 365        xfs_lsn_t               target;
 366        long                    tout;
 367        int                     stuck = 0;
 368        int                     flushing = 0;
 369        int                     count = 0;
 370
 371        /*
 372         * If we encountered pinned items or did not finish writing out all
 373         * buffers the last time we ran, force the log first and wait for it
 374         * before pushing again.
 375         */
 376        if (ailp->xa_log_flush && ailp->xa_last_pushed_lsn == 0 &&
 377            (!list_empty_careful(&ailp->xa_buf_list) ||
 378             xfs_ail_min_lsn(ailp))) {
 379                ailp->xa_log_flush = 0;
 380
 381                XFS_STATS_INC(xs_push_ail_flush);
 382                xfs_log_force(mp, XFS_LOG_SYNC);
 383        }
 384
 385        spin_lock(&ailp->xa_lock);
 386
 387        /* barrier matches the xa_target update in xfs_ail_push() */
 388        smp_rmb();
 389        target = ailp->xa_target;
 390        ailp->xa_target_prev = target;
 391
 392        lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
 393        if (!lip) {
 394                /*
 395                 * If the AIL is empty or our push has reached the end we are
 396                 * done now.
 397                 */
 398                xfs_trans_ail_cursor_done(ailp, &cur);
 399                spin_unlock(&ailp->xa_lock);
 400                goto out_done;
 401        }
 402
 403        XFS_STATS_INC(xs_push_ail);
 404
 405        lsn = lip->li_lsn;
 406        while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
 407                int     lock_result;
 408
 409                /*
 410                 * Note that IOP_PUSH may unlock and reacquire the AIL lock.  We
 411                 * rely on the AIL cursor implementation to be able to deal with
 412                 * the dropped lock.
 413                 */
 414                lock_result = IOP_PUSH(lip, &ailp->xa_buf_list);
 415                switch (lock_result) {
 416                case XFS_ITEM_SUCCESS:
 417                        XFS_STATS_INC(xs_push_ail_success);
 418                        trace_xfs_ail_push(lip);
 419
 420                        ailp->xa_last_pushed_lsn = lsn;
 421                        break;
 422
 423                case XFS_ITEM_FLUSHING:
 424                        /*
 425                         * The item or its backing buffer is already beeing
 426                         * flushed.  The typical reason for that is that an
 427                         * inode buffer is locked because we already pushed the
 428                         * updates to it as part of inode clustering.
 429                         *
 430                         * We do not want to to stop flushing just because lots
 431                         * of items are already beeing flushed, but we need to
 432                         * re-try the flushing relatively soon if most of the
 433                         * AIL is beeing flushed.
 434                         */
 435                        XFS_STATS_INC(xs_push_ail_flushing);
 436                        trace_xfs_ail_flushing(lip);
 437
 438                        flushing++;
 439                        ailp->xa_last_pushed_lsn = lsn;
 440                        break;
 441
 442                case XFS_ITEM_PINNED:
 443                        XFS_STATS_INC(xs_push_ail_pinned);
 444                        trace_xfs_ail_pinned(lip);
 445
 446                        stuck++;
 447                        ailp->xa_log_flush++;
 448                        break;
 449                case XFS_ITEM_LOCKED:
 450                        XFS_STATS_INC(xs_push_ail_locked);
 451                        trace_xfs_ail_locked(lip);
 452
 453                        stuck++;
 454                        break;
 455                default:
 456                        ASSERT(0);
 457                        break;
 458                }
 459
 460                count++;
 461
 462                /*
 463                 * Are there too many items we can't do anything with?
 464                 *
 465                 * If we we are skipping too many items because we can't flush
 466                 * them or they are already being flushed, we back off and
 467                 * given them time to complete whatever operation is being
 468                 * done. i.e. remove pressure from the AIL while we can't make
 469                 * progress so traversals don't slow down further inserts and
 470                 * removals to/from the AIL.
 471                 *
 472                 * The value of 100 is an arbitrary magic number based on
 473                 * observation.
 474                 */
 475                if (stuck > 100)
 476                        break;
 477
 478                lip = xfs_trans_ail_cursor_next(ailp, &cur);
 479                if (lip == NULL)
 480                        break;
 481                lsn = lip->li_lsn;
 482        }
 483        xfs_trans_ail_cursor_done(ailp, &cur);
 484        spin_unlock(&ailp->xa_lock);
 485
 486        if (xfs_buf_delwri_submit_nowait(&ailp->xa_buf_list))
 487                ailp->xa_log_flush++;
 488
 489        if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
 490out_done:
 491                /*
 492                 * We reached the target or the AIL is empty, so wait a bit
 493                 * longer for I/O to complete and remove pushed items from the
 494                 * AIL before we start the next scan from the start of the AIL.
 495                 */
 496                tout = 50;
 497                ailp->xa_last_pushed_lsn = 0;
 498        } else if (((stuck + flushing) * 100) / count > 90) {
 499                /*
 500                 * Either there is a lot of contention on the AIL or we are
 501                 * stuck due to operations in progress. "Stuck" in this case
 502                 * is defined as >90% of the items we tried to push were stuck.
 503                 *
 504                 * Backoff a bit more to allow some I/O to complete before
 505                 * restarting from the start of the AIL. This prevents us from
 506                 * spinning on the same items, and if they are pinned will all
 507                 * the restart to issue a log force to unpin the stuck items.
 508                 */
 509                tout = 20;
 510                ailp->xa_last_pushed_lsn = 0;
 511        } else {
 512                /*
 513                 * Assume we have more work to do in a short while.
 514                 */
 515                tout = 10;
 516        }
 517
 518        return tout;
 519}
 520
 521static int
 522xfsaild(
 523        void            *data)
 524{
 525        struct xfs_ail  *ailp = data;
 526        long            tout = 0;       /* milliseconds */
 527
 528        current->flags |= PF_MEMALLOC;
 529
 530        while (!kthread_should_stop()) {
 531                if (tout && tout <= 20)
 532                        __set_current_state(TASK_KILLABLE);
 533                else
 534                        __set_current_state(TASK_INTERRUPTIBLE);
 535
 536                spin_lock(&ailp->xa_lock);
 537
 538                /*
 539                 * Idle if the AIL is empty and we are not racing with a target
 540                 * update. We check the AIL after we set the task to a sleep
 541                 * state to guarantee that we either catch an xa_target update
 542                 * or that a wake_up resets the state to TASK_RUNNING.
 543                 * Otherwise, we run the risk of sleeping indefinitely.
 544                 *
 545                 * The barrier matches the xa_target update in xfs_ail_push().
 546                 */
 547                smp_rmb();
 548                if (!xfs_ail_min(ailp) &&
 549                    ailp->xa_target == ailp->xa_target_prev) {
 550                        spin_unlock(&ailp->xa_lock);
 551                        schedule();
 552                        tout = 0;
 553                        continue;
 554                }
 555                spin_unlock(&ailp->xa_lock);
 556
 557                if (tout)
 558                        schedule_timeout(msecs_to_jiffies(tout));
 559
 560                __set_current_state(TASK_RUNNING);
 561
 562                try_to_freeze();
 563
 564                tout = xfsaild_push(ailp);
 565        }
 566
 567        return 0;
 568}
 569
 570/*
 571 * This routine is called to move the tail of the AIL forward.  It does this by
 572 * trying to flush items in the AIL whose lsns are below the given
 573 * threshold_lsn.
 574 *
 575 * The push is run asynchronously in a workqueue, which means the caller needs
 576 * to handle waiting on the async flush for space to become available.
 577 * We don't want to interrupt any push that is in progress, hence we only queue
 578 * work if we set the pushing bit approriately.
 579 *
 580 * We do this unlocked - we only need to know whether there is anything in the
 581 * AIL at the time we are called. We don't need to access the contents of
 582 * any of the objects, so the lock is not needed.
 583 */
 584void
 585xfs_ail_push(
 586        struct xfs_ail  *ailp,
 587        xfs_lsn_t       threshold_lsn)
 588{
 589        xfs_log_item_t  *lip;
 590
 591        lip = xfs_ail_min(ailp);
 592        if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
 593            XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
 594                return;
 595
 596        /*
 597         * Ensure that the new target is noticed in push code before it clears
 598         * the XFS_AIL_PUSHING_BIT.
 599         */
 600        smp_wmb();
 601        xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
 602        smp_wmb();
 603
 604        wake_up_process(ailp->xa_task);
 605}
 606
 607/*
 608 * Push out all items in the AIL immediately
 609 */
 610void
 611xfs_ail_push_all(
 612        struct xfs_ail  *ailp)
 613{
 614        xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
 615
 616        if (threshold_lsn)
 617                xfs_ail_push(ailp, threshold_lsn);
 618}
 619
 620/*
 621 * Push out all items in the AIL immediately and wait until the AIL is empty.
 622 */
 623void
 624xfs_ail_push_all_sync(
 625        struct xfs_ail  *ailp)
 626{
 627        struct xfs_log_item     *lip;
 628        DEFINE_WAIT(wait);
 629
 630        spin_lock(&ailp->xa_lock);
 631        while ((lip = xfs_ail_max(ailp)) != NULL) {
 632                prepare_to_wait(&ailp->xa_empty, &wait, TASK_UNINTERRUPTIBLE);
 633                ailp->xa_target = lip->li_lsn;
 634                wake_up_process(ailp->xa_task);
 635                spin_unlock(&ailp->xa_lock);
 636                schedule();
 637                spin_lock(&ailp->xa_lock);
 638        }
 639        spin_unlock(&ailp->xa_lock);
 640
 641        finish_wait(&ailp->xa_empty, &wait);
 642}
 643
 644/*
 645 * xfs_trans_ail_update - bulk AIL insertion operation.
 646 *
 647 * @xfs_trans_ail_update takes an array of log items that all need to be
 648 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
 649 * be added.  Otherwise, it will be repositioned  by removing it and re-adding
 650 * it to the AIL. If we move the first item in the AIL, update the log tail to
 651 * match the new minimum LSN in the AIL.
 652 *
 653 * This function takes the AIL lock once to execute the update operations on
 654 * all the items in the array, and as such should not be called with the AIL
 655 * lock held. As a result, once we have the AIL lock, we need to check each log
 656 * item LSN to confirm it needs to be moved forward in the AIL.
 657 *
 658 * To optimise the insert operation, we delete all the items from the AIL in
 659 * the first pass, moving them into a temporary list, then splice the temporary
 660 * list into the correct position in the AIL. This avoids needing to do an
 661 * insert operation on every item.
 662 *
 663 * This function must be called with the AIL lock held.  The lock is dropped
 664 * before returning.
 665 */
 666void
 667xfs_trans_ail_update_bulk(
 668        struct xfs_ail          *ailp,
 669        struct xfs_ail_cursor   *cur,
 670        struct xfs_log_item     **log_items,
 671        int                     nr_items,
 672        xfs_lsn_t               lsn) __releases(ailp->xa_lock)
 673{
 674        xfs_log_item_t          *mlip;
 675        int                     mlip_changed = 0;
 676        int                     i;
 677        LIST_HEAD(tmp);
 678
 679        ASSERT(nr_items > 0);           /* Not required, but true. */
 680        mlip = xfs_ail_min(ailp);
 681
 682        for (i = 0; i < nr_items; i++) {
 683                struct xfs_log_item *lip = log_items[i];
 684                if (lip->li_flags & XFS_LI_IN_AIL) {
 685                        /* check if we really need to move the item */
 686                        if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
 687                                continue;
 688
 689                        xfs_ail_delete(ailp, lip);
 690                        if (mlip == lip)
 691                                mlip_changed = 1;
 692                } else {
 693                        lip->li_flags |= XFS_LI_IN_AIL;
 694                }
 695                lip->li_lsn = lsn;
 696                list_add(&lip->li_ail, &tmp);
 697        }
 698
 699        if (!list_empty(&tmp))
 700                xfs_ail_splice(ailp, cur, &tmp, lsn);
 701
 702        if (mlip_changed) {
 703                if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
 704                        xlog_assign_tail_lsn_locked(ailp->xa_mount);
 705                spin_unlock(&ailp->xa_lock);
 706
 707                xfs_log_space_wake(ailp->xa_mount);
 708        } else {
 709                spin_unlock(&ailp->xa_lock);
 710        }
 711}
 712
 713/*
 714 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
 715 *
 716 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
 717 * removed from the AIL. The caller is already holding the AIL lock, and done
 718 * all the checks necessary to ensure the items passed in via @log_items are
 719 * ready for deletion. This includes checking that the items are in the AIL.
 720 *
 721 * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
 722 * flag from the item and reset the item's lsn to 0. If we remove the first
 723 * item in the AIL, update the log tail to match the new minimum LSN in the
 724 * AIL.
 725 *
 726 * This function will not drop the AIL lock until all items are removed from
 727 * the AIL to minimise the amount of lock traffic on the AIL. This does not
 728 * greatly increase the AIL hold time, but does significantly reduce the amount
 729 * of traffic on the lock, especially during IO completion.
 730 *
 731 * This function must be called with the AIL lock held.  The lock is dropped
 732 * before returning.
 733 */
 734void
 735xfs_trans_ail_delete_bulk(
 736        struct xfs_ail          *ailp,
 737        struct xfs_log_item     **log_items,
 738        int                     nr_items,
 739        int                     shutdown_type) __releases(ailp->xa_lock)
 740{
 741        xfs_log_item_t          *mlip;
 742        int                     mlip_changed = 0;
 743        int                     i;
 744
 745        mlip = xfs_ail_min(ailp);
 746
 747        for (i = 0; i < nr_items; i++) {
 748                struct xfs_log_item *lip = log_items[i];
 749                if (!(lip->li_flags & XFS_LI_IN_AIL)) {
 750                        struct xfs_mount        *mp = ailp->xa_mount;
 751
 752                        spin_unlock(&ailp->xa_lock);
 753                        if (!XFS_FORCED_SHUTDOWN(mp)) {
 754                                xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
 755                "%s: attempting to delete a log item that is not in the AIL",
 756                                                __func__);
 757                                xfs_force_shutdown(mp, shutdown_type);
 758                        }
 759                        return;
 760                }
 761
 762                xfs_ail_delete(ailp, lip);
 763                lip->li_flags &= ~XFS_LI_IN_AIL;
 764                lip->li_lsn = 0;
 765                if (mlip == lip)
 766                        mlip_changed = 1;
 767        }
 768
 769        if (mlip_changed) {
 770                if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
 771                        xlog_assign_tail_lsn_locked(ailp->xa_mount);
 772                if (list_empty(&ailp->xa_ail))
 773                        wake_up_all(&ailp->xa_empty);
 774                spin_unlock(&ailp->xa_lock);
 775
 776                xfs_log_space_wake(ailp->xa_mount);
 777        } else {
 778                spin_unlock(&ailp->xa_lock);
 779        }
 780}
 781
 782int
 783xfs_trans_ail_init(
 784        xfs_mount_t     *mp)
 785{
 786        struct xfs_ail  *ailp;
 787
 788        ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
 789        if (!ailp)
 790                return ENOMEM;
 791
 792        ailp->xa_mount = mp;
 793        INIT_LIST_HEAD(&ailp->xa_ail);
 794        INIT_LIST_HEAD(&ailp->xa_cursors);
 795        spin_lock_init(&ailp->xa_lock);
 796        INIT_LIST_HEAD(&ailp->xa_buf_list);
 797        init_waitqueue_head(&ailp->xa_empty);
 798
 799        ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
 800                        ailp->xa_mount->m_fsname);
 801        if (IS_ERR(ailp->xa_task))
 802                goto out_free_ailp;
 803
 804        mp->m_ail = ailp;
 805        return 0;
 806
 807out_free_ailp:
 808        kmem_free(ailp);
 809        return ENOMEM;
 810}
 811
 812void
 813xfs_trans_ail_destroy(
 814        xfs_mount_t     *mp)
 815{
 816        struct xfs_ail  *ailp = mp->m_ail;
 817
 818        kthread_stop(ailp->xa_task);
 819        kmem_free(ailp);
 820}
 821
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.