linux/fs/xfs/xfs_trans_ail.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * Copyright (c) 2008 Dave Chinner
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_types.h"
  22#include "xfs_log.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_mount.h"
  27#include "xfs_trans_priv.h"
  28#include "xfs_trace.h"
  29#include "xfs_error.h"
  30
  31#ifdef DEBUG
  32/*
  33 * Check that the list is sorted as it should be.
  34 */
  35STATIC void
  36xfs_ail_check(
  37        struct xfs_ail  *ailp,
  38        xfs_log_item_t  *lip)
  39{
  40        xfs_log_item_t  *prev_lip;
  41
  42        if (list_empty(&ailp->xa_ail))
  43                return;
  44
  45        /*
  46         * Check the next and previous entries are valid.
  47         */
  48        ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
  49        prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
  50        if (&prev_lip->li_ail != &ailp->xa_ail)
  51                ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
  52
  53        prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
  54        if (&prev_lip->li_ail != &ailp->xa_ail)
  55                ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
  56
  57
  58}
  59#else /* !DEBUG */
  60#define xfs_ail_check(a,l)
  61#endif /* DEBUG */
  62
  63/*
  64 * Return a pointer to the first item in the AIL.  If the AIL is empty, then
  65 * return NULL.
  66 */
  67xfs_log_item_t *
  68xfs_ail_min(
  69        struct xfs_ail  *ailp)
  70{
  71        if (list_empty(&ailp->xa_ail))
  72                return NULL;
  73
  74        return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
  75}
  76
  77 /*
  78 * Return a pointer to the last item in the AIL.  If the AIL is empty, then
  79 * return NULL.
  80 */
  81static xfs_log_item_t *
  82xfs_ail_max(
  83        struct xfs_ail  *ailp)
  84{
  85        if (list_empty(&ailp->xa_ail))
  86                return NULL;
  87
  88        return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
  89}
  90
  91/*
  92 * Return a pointer to the item which follows the given item in the AIL.  If
  93 * the given item is the last item in the list, then return NULL.
  94 */
  95static xfs_log_item_t *
  96xfs_ail_next(
  97        struct xfs_ail  *ailp,
  98        xfs_log_item_t  *lip)
  99{
 100        if (lip->li_ail.next == &ailp->xa_ail)
 101                return NULL;
 102
 103        return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
 104}
 105
 106/*
 107 * This is called by the log manager code to determine the LSN of the tail of
 108 * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
 109 * is empty, then this function returns 0.
 110 *
 111 * We need the AIL lock in order to get a coherent read of the lsn of the last
 112 * item in the AIL.
 113 */
 114xfs_lsn_t
 115xfs_ail_min_lsn(
 116        struct xfs_ail  *ailp)
 117{
 118        xfs_lsn_t       lsn = 0;
 119        xfs_log_item_t  *lip;
 120
 121        spin_lock(&ailp->xa_lock);
 122        lip = xfs_ail_min(ailp);
 123        if (lip)
 124                lsn = lip->li_lsn;
 125        spin_unlock(&ailp->xa_lock);
 126
 127        return lsn;
 128}
 129
 130/*
 131 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
 132 */
 133static xfs_lsn_t
 134xfs_ail_max_lsn(
 135        struct xfs_ail  *ailp)
 136{
 137        xfs_lsn_t       lsn = 0;
 138        xfs_log_item_t  *lip;
 139
 140        spin_lock(&ailp->xa_lock);
 141        lip = xfs_ail_max(ailp);
 142        if (lip)
 143                lsn = lip->li_lsn;
 144        spin_unlock(&ailp->xa_lock);
 145
 146        return lsn;
 147}
 148
 149/*
 150 * The cursor keeps track of where our current traversal is up to by tracking
 151 * the next item in the list for us. However, for this to be safe, removing an
 152 * object from the AIL needs to invalidate any cursor that points to it. hence
 153 * the traversal cursor needs to be linked to the struct xfs_ail so that
 154 * deletion can search all the active cursors for invalidation.
 155 */
 156STATIC void
 157xfs_trans_ail_cursor_init(
 158        struct xfs_ail          *ailp,
 159        struct xfs_ail_cursor   *cur)
 160{
 161        cur->item = NULL;
 162        list_add_tail(&cur->list, &ailp->xa_cursors);
 163}
 164
 165/*
 166 * Get the next item in the traversal and advance the cursor.  If the cursor
 167 * was invalidated (indicated by a lip of 1), restart the traversal.
 168 */
 169struct xfs_log_item *
 170xfs_trans_ail_cursor_next(
 171        struct xfs_ail          *ailp,
 172        struct xfs_ail_cursor   *cur)
 173{
 174        struct xfs_log_item     *lip = cur->item;
 175
 176        if ((__psint_t)lip & 1)
 177                lip = xfs_ail_min(ailp);
 178        if (lip)
 179                cur->item = xfs_ail_next(ailp, lip);
 180        return lip;
 181}
 182
 183/*
 184 * When the traversal is complete, we need to remove the cursor from the list
 185 * of traversing cursors.
 186 */
 187void
 188xfs_trans_ail_cursor_done(
 189        struct xfs_ail          *ailp,
 190        struct xfs_ail_cursor   *cur)
 191{
 192        cur->item = NULL;
 193        list_del_init(&cur->list);
 194}
 195
 196/*
 197 * Invalidate any cursor that is pointing to this item. This is called when an
 198 * item is removed from the AIL. Any cursor pointing to this object is now
 199 * invalid and the traversal needs to be terminated so it doesn't reference a
 200 * freed object. We set the low bit of the cursor item pointer so we can
 201 * distinguish between an invalidation and the end of the list when getting the
 202 * next item from the cursor.
 203 */
 204STATIC void
 205xfs_trans_ail_cursor_clear(
 206        struct xfs_ail          *ailp,
 207        struct xfs_log_item     *lip)
 208{
 209        struct xfs_ail_cursor   *cur;
 210
 211        list_for_each_entry(cur, &ailp->xa_cursors, list) {
 212                if (cur->item == lip)
 213                        cur->item = (struct xfs_log_item *)
 214                                        ((__psint_t)cur->item | 1);
 215        }
 216}
 217
 218/*
 219 * Find the first item in the AIL with the given @lsn by searching in ascending
 220 * LSN order and initialise the cursor to point to the next item for a
 221 * ascending traversal.  Pass a @lsn of zero to initialise the cursor to the
 222 * first item in the AIL. Returns NULL if the list is empty.
 223 */
 224xfs_log_item_t *
 225xfs_trans_ail_cursor_first(
 226        struct xfs_ail          *ailp,
 227        struct xfs_ail_cursor   *cur,
 228        xfs_lsn_t               lsn)
 229{
 230        xfs_log_item_t          *lip;
 231
 232        xfs_trans_ail_cursor_init(ailp, cur);
 233
 234        if (lsn == 0) {
 235                lip = xfs_ail_min(ailp);
 236                goto out;
 237        }
 238
 239        list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
 240                if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
 241                        goto out;
 242        }
 243        return NULL;
 244
 245out:
 246        if (lip)
 247                cur->item = xfs_ail_next(ailp, lip);
 248        return lip;
 249}
 250
 251static struct xfs_log_item *
 252__xfs_trans_ail_cursor_last(
 253        struct xfs_ail          *ailp,
 254        xfs_lsn_t               lsn)
 255{
 256        xfs_log_item_t          *lip;
 257
 258        list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
 259                if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
 260                        return lip;
 261        }
 262        return NULL;
 263}
 264
 265/*
 266 * Find the last item in the AIL with the given @lsn by searching in descending
 267 * LSN order and initialise the cursor to point to that item.  If there is no
 268 * item with the value of @lsn, then it sets the cursor to the last item with an
 269 * LSN lower than @lsn.  Returns NULL if the list is empty.
 270 */
 271struct xfs_log_item *
 272xfs_trans_ail_cursor_last(
 273        struct xfs_ail          *ailp,
 274        struct xfs_ail_cursor   *cur,
 275        xfs_lsn_t               lsn)
 276{
 277        xfs_trans_ail_cursor_init(ailp, cur);
 278        cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
 279        return cur->item;
 280}
 281
 282/*
 283 * Splice the log item list into the AIL at the given LSN. We splice to the
 284 * tail of the given LSN to maintain insert order for push traversals. The
 285 * cursor is optional, allowing repeated updates to the same LSN to avoid
 286 * repeated traversals.  This should not be called with an empty list.
 287 */
 288static void
 289xfs_ail_splice(
 290        struct xfs_ail          *ailp,
 291        struct xfs_ail_cursor   *cur,
 292        struct list_head        *list,
 293        xfs_lsn_t               lsn)
 294{
 295        struct xfs_log_item     *lip;
 296
 297        ASSERT(!list_empty(list));
 298
 299        /*
 300         * Use the cursor to determine the insertion point if one is
 301         * provided.  If not, or if the one we got is not valid,
 302         * find the place in the AIL where the items belong.
 303         */
 304        lip = cur ? cur->item : NULL;
 305        if (!lip || (__psint_t) lip & 1)
 306                lip = __xfs_trans_ail_cursor_last(ailp, lsn);
 307
 308        /*
 309         * If a cursor is provided, we know we're processing the AIL
 310         * in lsn order, and future items to be spliced in will
 311         * follow the last one being inserted now.  Update the
 312         * cursor to point to that last item, now while we have a
 313         * reliable pointer to it.
 314         */
 315        if (cur)
 316                cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
 317
 318        /*
 319         * Finally perform the splice.  Unless the AIL was empty,
 320         * lip points to the item in the AIL _after_ which the new
 321         * items should go.  If lip is null the AIL was empty, so
 322         * the new items go at the head of the AIL.
 323         */
 324        if (lip)
 325                list_splice(list, &lip->li_ail);
 326        else
 327                list_splice(list, &ailp->xa_ail);
 328}
 329
 330/*
 331 * Delete the given item from the AIL.  Return a pointer to the item.
 332 */
 333static void
 334xfs_ail_delete(
 335        struct xfs_ail  *ailp,
 336        xfs_log_item_t  *lip)
 337{
 338        xfs_ail_check(ailp, lip);
 339        list_del(&lip->li_ail);
 340        xfs_trans_ail_cursor_clear(ailp, lip);
 341}
 342
 343static long
 344xfsaild_push(
 345        struct xfs_ail          *ailp)
 346{
 347        xfs_mount_t             *mp = ailp->xa_mount;
 348        struct xfs_ail_cursor   cur;
 349        xfs_log_item_t          *lip;
 350        xfs_lsn_t               lsn;
 351        xfs_lsn_t               target;
 352        long                    tout;
 353        int                     stuck = 0;
 354        int                     flushing = 0;
 355        int                     count = 0;
 356
 357        /*
 358         * If we encountered pinned items or did not finish writing out all
 359         * buffers the last time we ran, force the log first and wait for it
 360         * before pushing again.
 361         */
 362        if (ailp->xa_log_flush && ailp->xa_last_pushed_lsn == 0 &&
 363            (!list_empty_careful(&ailp->xa_buf_list) ||
 364             xfs_ail_min_lsn(ailp))) {
 365                ailp->xa_log_flush = 0;
 366
 367                XFS_STATS_INC(xs_push_ail_flush);
 368                xfs_log_force(mp, XFS_LOG_SYNC);
 369        }
 370
 371        spin_lock(&ailp->xa_lock);
 372
 373        /* barrier matches the xa_target update in xfs_ail_push() */
 374        smp_rmb();
 375        target = ailp->xa_target;
 376        ailp->xa_target_prev = target;
 377
 378        lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
 379        if (!lip) {
 380                /*
 381                 * If the AIL is empty or our push has reached the end we are
 382                 * done now.
 383                 */
 384                xfs_trans_ail_cursor_done(ailp, &cur);
 385                spin_unlock(&ailp->xa_lock);
 386                goto out_done;
 387        }
 388
 389        XFS_STATS_INC(xs_push_ail);
 390
 391        lsn = lip->li_lsn;
 392        while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
 393                int     lock_result;
 394
 395                /*
 396                 * Note that IOP_PUSH may unlock and reacquire the AIL lock.  We
 397                 * rely on the AIL cursor implementation to be able to deal with
 398                 * the dropped lock.
 399                 */
 400                lock_result = IOP_PUSH(lip, &ailp->xa_buf_list);
 401                switch (lock_result) {
 402                case XFS_ITEM_SUCCESS:
 403                        XFS_STATS_INC(xs_push_ail_success);
 404                        trace_xfs_ail_push(lip);
 405
 406                        ailp->xa_last_pushed_lsn = lsn;
 407                        break;
 408
 409                case XFS_ITEM_FLUSHING:
 410                        /*
 411                         * The item or its backing buffer is already beeing
 412                         * flushed.  The typical reason for that is that an
 413                         * inode buffer is locked because we already pushed the
 414                         * updates to it as part of inode clustering.
 415                         *
 416                         * We do not want to to stop flushing just because lots
 417                         * of items are already beeing flushed, but we need to
 418                         * re-try the flushing relatively soon if most of the
 419                         * AIL is beeing flushed.
 420                         */
 421                        XFS_STATS_INC(xs_push_ail_flushing);
 422                        trace_xfs_ail_flushing(lip);
 423
 424                        flushing++;
 425                        ailp->xa_last_pushed_lsn = lsn;
 426                        break;
 427
 428                case XFS_ITEM_PINNED:
 429                        XFS_STATS_INC(xs_push_ail_pinned);
 430                        trace_xfs_ail_pinned(lip);
 431
 432                        stuck++;
 433                        ailp->xa_log_flush++;
 434                        break;
 435                case XFS_ITEM_LOCKED:
 436                        XFS_STATS_INC(xs_push_ail_locked);
 437                        trace_xfs_ail_locked(lip);
 438
 439                        stuck++;
 440                        break;
 441                default:
 442                        ASSERT(0);
 443                        break;
 444                }
 445
 446                count++;
 447
 448                /*
 449                 * Are there too many items we can't do anything with?
 450                 *
 451                 * If we we are skipping too many items because we can't flush
 452                 * them or they are already being flushed, we back off and
 453                 * given them time to complete whatever operation is being
 454                 * done. i.e. remove pressure from the AIL while we can't make
 455                 * progress so traversals don't slow down further inserts and
 456                 * removals to/from the AIL.
 457                 *
 458                 * The value of 100 is an arbitrary magic number based on
 459                 * observation.
 460                 */
 461                if (stuck > 100)
 462                        break;
 463
 464                lip = xfs_trans_ail_cursor_next(ailp, &cur);
 465                if (lip == NULL)
 466                        break;
 467                lsn = lip->li_lsn;
 468        }
 469        xfs_trans_ail_cursor_done(ailp, &cur);
 470        spin_unlock(&ailp->xa_lock);
 471
 472        if (xfs_buf_delwri_submit_nowait(&ailp->xa_buf_list))
 473                ailp->xa_log_flush++;
 474
 475        if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
 476out_done:
 477                /*
 478                 * We reached the target or the AIL is empty, so wait a bit
 479                 * longer for I/O to complete and remove pushed items from the
 480                 * AIL before we start the next scan from the start of the AIL.
 481                 */
 482                tout = 50;
 483                ailp->xa_last_pushed_lsn = 0;
 484        } else if (((stuck + flushing) * 100) / count > 90) {
 485                /*
 486                 * Either there is a lot of contention on the AIL or we are
 487                 * stuck due to operations in progress. "Stuck" in this case
 488                 * is defined as >90% of the items we tried to push were stuck.
 489                 *
 490                 * Backoff a bit more to allow some I/O to complete before
 491                 * restarting from the start of the AIL. This prevents us from
 492                 * spinning on the same items, and if they are pinned will all
 493                 * the restart to issue a log force to unpin the stuck items.
 494                 */
 495                tout = 20;
 496                ailp->xa_last_pushed_lsn = 0;
 497        } else {
 498                /*
 499                 * Assume we have more work to do in a short while.
 500                 */
 501                tout = 10;
 502        }
 503
 504        return tout;
 505}
 506
 507static int
 508xfsaild(
 509        void            *data)
 510{
 511        struct xfs_ail  *ailp = data;
 512        long            tout = 0;       /* milliseconds */
 513
 514        current->flags |= PF_MEMALLOC;
 515
 516        while (!kthread_should_stop()) {
 517                if (tout && tout <= 20)
 518                        __set_current_state(TASK_KILLABLE);
 519                else
 520                        __set_current_state(TASK_INTERRUPTIBLE);
 521
 522                spin_lock(&ailp->xa_lock);
 523
 524                /*
 525                 * Idle if the AIL is empty and we are not racing with a target
 526                 * update. We check the AIL after we set the task to a sleep
 527                 * state to guarantee that we either catch an xa_target update
 528                 * or that a wake_up resets the state to TASK_RUNNING.
 529                 * Otherwise, we run the risk of sleeping indefinitely.
 530                 *
 531                 * The barrier matches the xa_target update in xfs_ail_push().
 532                 */
 533                smp_rmb();
 534                if (!xfs_ail_min(ailp) &&
 535                    ailp->xa_target == ailp->xa_target_prev) {
 536                        spin_unlock(&ailp->xa_lock);
 537                        schedule();
 538                        tout = 0;
 539                        continue;
 540                }
 541                spin_unlock(&ailp->xa_lock);
 542
 543                if (tout)
 544                        schedule_timeout(msecs_to_jiffies(tout));
 545
 546                __set_current_state(TASK_RUNNING);
 547
 548                try_to_freeze();
 549
 550                tout = xfsaild_push(ailp);
 551        }
 552
 553        return 0;
 554}
 555
 556/*
 557 * This routine is called to move the tail of the AIL forward.  It does this by
 558 * trying to flush items in the AIL whose lsns are below the given
 559 * threshold_lsn.
 560 *
 561 * The push is run asynchronously in a workqueue, which means the caller needs
 562 * to handle waiting on the async flush for space to become available.
 563 * We don't want to interrupt any push that is in progress, hence we only queue
 564 * work if we set the pushing bit approriately.
 565 *
 566 * We do this unlocked - we only need to know whether there is anything in the
 567 * AIL at the time we are called. We don't need to access the contents of
 568 * any of the objects, so the lock is not needed.
 569 */
 570void
 571xfs_ail_push(
 572        struct xfs_ail  *ailp,
 573        xfs_lsn_t       threshold_lsn)
 574{
 575        xfs_log_item_t  *lip;
 576
 577        lip = xfs_ail_min(ailp);
 578        if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
 579            XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
 580                return;
 581
 582        /*
 583         * Ensure that the new target is noticed in push code before it clears
 584         * the XFS_AIL_PUSHING_BIT.
 585         */
 586        smp_wmb();
 587        xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
 588        smp_wmb();
 589
 590        wake_up_process(ailp->xa_task);
 591}
 592
 593/*
 594 * Push out all items in the AIL immediately
 595 */
 596void
 597xfs_ail_push_all(
 598        struct xfs_ail  *ailp)
 599{
 600        xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
 601
 602        if (threshold_lsn)
 603                xfs_ail_push(ailp, threshold_lsn);
 604}
 605
 606/*
 607 * Push out all items in the AIL immediately and wait until the AIL is empty.
 608 */
 609void
 610xfs_ail_push_all_sync(
 611        struct xfs_ail  *ailp)
 612{
 613        struct xfs_log_item     *lip;
 614        DEFINE_WAIT(wait);
 615
 616        spin_lock(&ailp->xa_lock);
 617        while ((lip = xfs_ail_max(ailp)) != NULL) {
 618                prepare_to_wait(&ailp->xa_empty, &wait, TASK_UNINTERRUPTIBLE);
 619                ailp->xa_target = lip->li_lsn;
 620                wake_up_process(ailp->xa_task);
 621                spin_unlock(&ailp->xa_lock);
 622                schedule();
 623                spin_lock(&ailp->xa_lock);
 624        }
 625        spin_unlock(&ailp->xa_lock);
 626
 627        finish_wait(&ailp->xa_empty, &wait);
 628}
 629
 630/*
 631 * xfs_trans_ail_update - bulk AIL insertion operation.
 632 *
 633 * @xfs_trans_ail_update takes an array of log items that all need to be
 634 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
 635 * be added.  Otherwise, it will be repositioned  by removing it and re-adding
 636 * it to the AIL. If we move the first item in the AIL, update the log tail to
 637 * match the new minimum LSN in the AIL.
 638 *
 639 * This function takes the AIL lock once to execute the update operations on
 640 * all the items in the array, and as such should not be called with the AIL
 641 * lock held. As a result, once we have the AIL lock, we need to check each log
 642 * item LSN to confirm it needs to be moved forward in the AIL.
 643 *
 644 * To optimise the insert operation, we delete all the items from the AIL in
 645 * the first pass, moving them into a temporary list, then splice the temporary
 646 * list into the correct position in the AIL. This avoids needing to do an
 647 * insert operation on every item.
 648 *
 649 * This function must be called with the AIL lock held.  The lock is dropped
 650 * before returning.
 651 */
 652void
 653xfs_trans_ail_update_bulk(
 654        struct xfs_ail          *ailp,
 655        struct xfs_ail_cursor   *cur,
 656        struct xfs_log_item     **log_items,
 657        int                     nr_items,
 658        xfs_lsn_t               lsn) __releases(ailp->xa_lock)
 659{
 660        xfs_log_item_t          *mlip;
 661        int                     mlip_changed = 0;
 662        int                     i;
 663        LIST_HEAD(tmp);
 664
 665        ASSERT(nr_items > 0);           /* Not required, but true. */
 666        mlip = xfs_ail_min(ailp);
 667
 668        for (i = 0; i < nr_items; i++) {
 669                struct xfs_log_item *lip = log_items[i];
 670                if (lip->li_flags & XFS_LI_IN_AIL) {
 671                        /* check if we really need to move the item */
 672                        if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
 673                                continue;
 674
 675                        xfs_ail_delete(ailp, lip);
 676                        if (mlip == lip)
 677                                mlip_changed = 1;
 678                } else {
 679                        lip->li_flags |= XFS_LI_IN_AIL;
 680                }
 681                lip->li_lsn = lsn;
 682                list_add(&lip->li_ail, &tmp);
 683        }
 684
 685        if (!list_empty(&tmp))
 686                xfs_ail_splice(ailp, cur, &tmp, lsn);
 687
 688        if (mlip_changed) {
 689                if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
 690                        xlog_assign_tail_lsn_locked(ailp->xa_mount);
 691                spin_unlock(&ailp->xa_lock);
 692
 693                xfs_log_space_wake(ailp->xa_mount);
 694        } else {
 695                spin_unlock(&ailp->xa_lock);
 696        }
 697}
 698
 699/*
 700 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
 701 *
 702 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
 703 * removed from the AIL. The caller is already holding the AIL lock, and done
 704 * all the checks necessary to ensure the items passed in via @log_items are
 705 * ready for deletion. This includes checking that the items are in the AIL.
 706 *
 707 * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
 708 * flag from the item and reset the item's lsn to 0. If we remove the first
 709 * item in the AIL, update the log tail to match the new minimum LSN in the
 710 * AIL.
 711 *
 712 * This function will not drop the AIL lock until all items are removed from
 713 * the AIL to minimise the amount of lock traffic on the AIL. This does not
 714 * greatly increase the AIL hold time, but does significantly reduce the amount
 715 * of traffic on the lock, especially during IO completion.
 716 *
 717 * This function must be called with the AIL lock held.  The lock is dropped
 718 * before returning.
 719 */
 720void
 721xfs_trans_ail_delete_bulk(
 722        struct xfs_ail          *ailp,
 723        struct xfs_log_item     **log_items,
 724        int                     nr_items,
 725        int                     shutdown_type) __releases(ailp->xa_lock)
 726{
 727        xfs_log_item_t          *mlip;
 728        int                     mlip_changed = 0;
 729        int                     i;
 730
 731        mlip = xfs_ail_min(ailp);
 732
 733        for (i = 0; i < nr_items; i++) {
 734                struct xfs_log_item *lip = log_items[i];
 735                if (!(lip->li_flags & XFS_LI_IN_AIL)) {
 736                        struct xfs_mount        *mp = ailp->xa_mount;
 737
 738                        spin_unlock(&ailp->xa_lock);
 739                        if (!XFS_FORCED_SHUTDOWN(mp)) {
 740                                xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
 741                "%s: attempting to delete a log item that is not in the AIL",
 742                                                __func__);
 743                                xfs_force_shutdown(mp, shutdown_type);
 744                        }
 745                        return;
 746                }
 747
 748                xfs_ail_delete(ailp, lip);
 749                lip->li_flags &= ~XFS_LI_IN_AIL;
 750                lip->li_lsn = 0;
 751                if (mlip == lip)
 752                        mlip_changed = 1;
 753        }
 754
 755        if (mlip_changed) {
 756                if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
 757                        xlog_assign_tail_lsn_locked(ailp->xa_mount);
 758                if (list_empty(&ailp->xa_ail))
 759                        wake_up_all(&ailp->xa_empty);
 760                spin_unlock(&ailp->xa_lock);
 761
 762                xfs_log_space_wake(ailp->xa_mount);
 763        } else {
 764                spin_unlock(&ailp->xa_lock);
 765        }
 766}
 767
 768int
 769xfs_trans_ail_init(
 770        xfs_mount_t     *mp)
 771{
 772        struct xfs_ail  *ailp;
 773
 774        ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
 775        if (!ailp)
 776                return ENOMEM;
 777
 778        ailp->xa_mount = mp;
 779        INIT_LIST_HEAD(&ailp->xa_ail);
 780        INIT_LIST_HEAD(&ailp->xa_cursors);
 781        spin_lock_init(&ailp->xa_lock);
 782        INIT_LIST_HEAD(&ailp->xa_buf_list);
 783        init_waitqueue_head(&ailp->xa_empty);
 784
 785        ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
 786                        ailp->xa_mount->m_fsname);
 787        if (IS_ERR(ailp->xa_task))
 788                goto out_free_ailp;
 789
 790        mp->m_ail = ailp;
 791        return 0;
 792
 793out_free_ailp:
 794        kmem_free(ailp);
 795        return ENOMEM;
 796}
 797
 798void
 799xfs_trans_ail_destroy(
 800        xfs_mount_t     *mp)
 801{
 802        struct xfs_ail  *ailp = mp->m_ail;
 803
 804        kthread_stop(ailp->xa_task);
 805        kmem_free(ailp);
 806}
 807
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.