linux/block/cfq-iosched.c
<<
>>
Prefs
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/blkdev.h>
  11#include <linux/elevator.h>
  12#include <linux/rbtree.h>
  13#include <linux/ioprio.h>
  14#include <linux/blktrace_api.h>
  15
  16/*
  17 * tunables
  18 */
  19/* max queue in one round of service */
  20static const int cfq_quantum = 4;
  21static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  22/* maximum backwards seek, in KiB */
  23static const int cfq_back_max = 16 * 1024;
  24/* penalty of a backwards seek */
  25static const int cfq_back_penalty = 2;
  26static const int cfq_slice_sync = HZ / 10;
  27static int cfq_slice_async = HZ / 25;
  28static const int cfq_slice_async_rq = 2;
  29static int cfq_slice_idle = HZ / 125;
  30
  31/*
  32 * offset from end of service tree
  33 */
  34#define CFQ_IDLE_DELAY          (HZ / 5)
  35
  36/*
  37 * below this threshold, we consider thinktime immediate
  38 */
  39#define CFQ_MIN_TT              (2)
  40
  41#define CFQ_SLICE_SCALE         (5)
  42
  43#define RQ_CIC(rq)              \
  44        ((struct cfq_io_context *) (rq)->elevator_private)
  45#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
  46
  47static struct kmem_cache *cfq_pool;
  48static struct kmem_cache *cfq_ioc_pool;
  49
  50static DEFINE_PER_CPU(unsigned long, ioc_count);
  51static struct completion *ioc_gone;
  52static DEFINE_SPINLOCK(ioc_gone_lock);
  53
  54#define CFQ_PRIO_LISTS          IOPRIO_BE_NR
  55#define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  56#define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  57
  58#define ASYNC                   (0)
  59#define SYNC                    (1)
  60
  61#define sample_valid(samples)   ((samples) > 80)
  62
  63/*
  64 * Most of our rbtree usage is for sorting with min extraction, so
  65 * if we cache the leftmost node we don't have to walk down the tree
  66 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  67 * move this into the elevator for the rq sorting as well.
  68 */
  69struct cfq_rb_root {
  70        struct rb_root rb;
  71        struct rb_node *left;
  72};
  73#define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, }
  74
  75/*
  76 * Per block device queue structure
  77 */
  78struct cfq_data {
  79        struct request_queue *queue;
  80
  81        /*
  82         * rr list of queues with requests and the count of them
  83         */
  84        struct cfq_rb_root service_tree;
  85        unsigned int busy_queues;
  86
  87        int rq_in_driver;
  88        int sync_flight;
  89        int hw_tag;
  90
  91        /*
  92         * idle window management
  93         */
  94        struct timer_list idle_slice_timer;
  95        struct work_struct unplug_work;
  96
  97        struct cfq_queue *active_queue;
  98        struct cfq_io_context *active_cic;
  99
 100        /*
 101         * async queue for each priority case
 102         */
 103        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 104        struct cfq_queue *async_idle_cfqq;
 105
 106        sector_t last_position;
 107        unsigned long last_end_request;
 108
 109        /*
 110         * tunables, see top of file
 111         */
 112        unsigned int cfq_quantum;
 113        unsigned int cfq_fifo_expire[2];
 114        unsigned int cfq_back_penalty;
 115        unsigned int cfq_back_max;
 116        unsigned int cfq_slice[2];
 117        unsigned int cfq_slice_async_rq;
 118        unsigned int cfq_slice_idle;
 119
 120        struct list_head cic_list;
 121};
 122
 123/*
 124 * Per process-grouping structure
 125 */
 126struct cfq_queue {
 127        /* reference count */
 128        atomic_t ref;
 129        /* various state flags, see below */
 130        unsigned int flags;
 131        /* parent cfq_data */
 132        struct cfq_data *cfqd;
 133        /* service_tree member */
 134        struct rb_node rb_node;
 135        /* service_tree key */
 136        unsigned long rb_key;
 137        /* sorted list of pending requests */
 138        struct rb_root sort_list;
 139        /* if fifo isn't expired, next request to serve */
 140        struct request *next_rq;
 141        /* requests queued in sort_list */
 142        int queued[2];
 143        /* currently allocated requests */
 144        int allocated[2];
 145        /* fifo list of requests in sort_list */
 146        struct list_head fifo;
 147
 148        unsigned long slice_end;
 149        long slice_resid;
 150
 151        /* pending metadata requests */
 152        int meta_pending;
 153        /* number of requests that are on the dispatch list or inside driver */
 154        int dispatched;
 155
 156        /* io prio of this group */
 157        unsigned short ioprio, org_ioprio;
 158        unsigned short ioprio_class, org_ioprio_class;
 159
 160        pid_t pid;
 161};
 162
 163enum cfqq_state_flags {
 164        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
 165        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
 166        CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
 167        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
 168        CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
 169        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 170        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
 171        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
 172        CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
 173        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
 174        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 175};
 176
 177#define CFQ_CFQQ_FNS(name)                                              \
 178static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
 179{                                                                       \
 180        (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 181}                                                                       \
 182static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
 183{                                                                       \
 184        (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 185}                                                                       \
 186static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
 187{                                                                       \
 188        return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 189}
 190
 191CFQ_CFQQ_FNS(on_rr);
 192CFQ_CFQQ_FNS(wait_request);
 193CFQ_CFQQ_FNS(must_alloc);
 194CFQ_CFQQ_FNS(must_alloc_slice);
 195CFQ_CFQQ_FNS(must_dispatch);
 196CFQ_CFQQ_FNS(fifo_expire);
 197CFQ_CFQQ_FNS(idle_window);
 198CFQ_CFQQ_FNS(prio_changed);
 199CFQ_CFQQ_FNS(queue_new);
 200CFQ_CFQQ_FNS(slice_new);
 201CFQ_CFQQ_FNS(sync);
 202#undef CFQ_CFQQ_FNS
 203
 204#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 205        blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
 206#define cfq_log(cfqd, fmt, args...)     \
 207        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 208
 209static void cfq_dispatch_insert(struct request_queue *, struct request *);
 210static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
 211                                       struct io_context *, gfp_t);
 212static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
 213                                                struct io_context *);
 214
 215static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
 216                                            int is_sync)
 217{
 218        return cic->cfqq[!!is_sync];
 219}
 220
 221static inline void cic_set_cfqq(struct cfq_io_context *cic,
 222                                struct cfq_queue *cfqq, int is_sync)
 223{
 224        cic->cfqq[!!is_sync] = cfqq;
 225}
 226
 227/*
 228 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 229 * set (in which case it could also be direct WRITE).
 230 */
 231static inline int cfq_bio_sync(struct bio *bio)
 232{
 233        if (bio_data_dir(bio) == READ || bio_sync(bio))
 234                return 1;
 235
 236        return 0;
 237}
 238
 239/*
 240 * scheduler run of queue, if there are requests pending and no one in the
 241 * driver that will restart queueing
 242 */
 243static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 244{
 245        if (cfqd->busy_queues) {
 246                cfq_log(cfqd, "schedule dispatch");
 247                kblockd_schedule_work(&cfqd->unplug_work);
 248        }
 249}
 250
 251static int cfq_queue_empty(struct request_queue *q)
 252{
 253        struct cfq_data *cfqd = q->elevator->elevator_data;
 254
 255        return !cfqd->busy_queues;
 256}
 257
 258/*
 259 * Scale schedule slice based on io priority. Use the sync time slice only
 260 * if a queue is marked sync and has sync io queued. A sync queue with async
 261 * io only, should not get full sync slice length.
 262 */
 263static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
 264                                 unsigned short prio)
 265{
 266        const int base_slice = cfqd->cfq_slice[sync];
 267
 268        WARN_ON(prio >= IOPRIO_BE_NR);
 269
 270        return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 271}
 272
 273static inline int
 274cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 275{
 276        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 277}
 278
 279static inline void
 280cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 281{
 282        cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
 283        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
 284}
 285
 286/*
 287 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 288 * isn't valid until the first request from the dispatch is activated
 289 * and the slice time set.
 290 */
 291static inline int cfq_slice_used(struct cfq_queue *cfqq)
 292{
 293        if (cfq_cfqq_slice_new(cfqq))
 294                return 0;
 295        if (time_before(jiffies, cfqq->slice_end))
 296                return 0;
 297
 298        return 1;
 299}
 300
 301/*
 302 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
 303 * We choose the request that is closest to the head right now. Distance
 304 * behind the head is penalized and only allowed to a certain extent.
 305 */
 306static struct request *
 307cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
 308{
 309        sector_t last, s1, s2, d1 = 0, d2 = 0;
 310        unsigned long back_max;
 311#define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
 312#define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
 313        unsigned wrap = 0; /* bit mask: requests behind the disk head? */
 314
 315        if (rq1 == NULL || rq1 == rq2)
 316                return rq2;
 317        if (rq2 == NULL)
 318                return rq1;
 319
 320        if (rq_is_sync(rq1) && !rq_is_sync(rq2))
 321                return rq1;
 322        else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
 323                return rq2;
 324        if (rq_is_meta(rq1) && !rq_is_meta(rq2))
 325                return rq1;
 326        else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
 327                return rq2;
 328
 329        s1 = rq1->sector;
 330        s2 = rq2->sector;
 331
 332        last = cfqd->last_position;
 333
 334        /*
 335         * by definition, 1KiB is 2 sectors
 336         */
 337        back_max = cfqd->cfq_back_max * 2;
 338
 339        /*
 340         * Strict one way elevator _except_ in the case where we allow
 341         * short backward seeks which are biased as twice the cost of a
 342         * similar forward seek.
 343         */
 344        if (s1 >= last)
 345                d1 = s1 - last;
 346        else if (s1 + back_max >= last)
 347                d1 = (last - s1) * cfqd->cfq_back_penalty;
 348        else
 349                wrap |= CFQ_RQ1_WRAP;
 350
 351        if (s2 >= last)
 352                d2 = s2 - last;
 353        else if (s2 + back_max >= last)
 354                d2 = (last - s2) * cfqd->cfq_back_penalty;
 355        else
 356                wrap |= CFQ_RQ2_WRAP;
 357
 358        /* Found required data */
 359
 360        /*
 361         * By doing switch() on the bit mask "wrap" we avoid having to
 362         * check two variables for all permutations: --> faster!
 363         */
 364        switch (wrap) {
 365        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
 366                if (d1 < d2)
 367                        return rq1;
 368                else if (d2 < d1)
 369                        return rq2;
 370                else {
 371                        if (s1 >= s2)
 372                                return rq1;
 373                        else
 374                                return rq2;
 375                }
 376
 377        case CFQ_RQ2_WRAP:
 378                return rq1;
 379        case CFQ_RQ1_WRAP:
 380                return rq2;
 381        case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
 382        default:
 383                /*
 384                 * Since both rqs are wrapped,
 385                 * start with the one that's further behind head
 386                 * (--> only *one* back seek required),
 387                 * since back seek takes more time than forward.
 388                 */
 389                if (s1 <= s2)
 390                        return rq1;
 391                else
 392                        return rq2;
 393        }
 394}
 395
 396/*
 397 * The below is leftmost cache rbtree addon
 398 */
 399static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
 400{
 401        if (!root->left)
 402                root->left = rb_first(&root->rb);
 403
 404        if (root->left)
 405                return rb_entry(root->left, struct cfq_queue, rb_node);
 406
 407        return NULL;
 408}
 409
 410static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
 411{
 412        if (root->left == n)
 413                root->left = NULL;
 414
 415        rb_erase(n, &root->rb);
 416        RB_CLEAR_NODE(n);
 417}
 418
 419/*
 420 * would be nice to take fifo expire time into account as well
 421 */
 422static struct request *
 423cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 424                  struct request *last)
 425{
 426        struct rb_node *rbnext = rb_next(&last->rb_node);
 427        struct rb_node *rbprev = rb_prev(&last->rb_node);
 428        struct request *next = NULL, *prev = NULL;
 429
 430        BUG_ON(RB_EMPTY_NODE(&last->rb_node));
 431
 432        if (rbprev)
 433                prev = rb_entry_rq(rbprev);
 434
 435        if (rbnext)
 436                next = rb_entry_rq(rbnext);
 437        else {
 438                rbnext = rb_first(&cfqq->sort_list);
 439                if (rbnext && rbnext != &last->rb_node)
 440                        next = rb_entry_rq(rbnext);
 441        }
 442
 443        return cfq_choose_req(cfqd, next, prev);
 444}
 445
 446static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
 447                                      struct cfq_queue *cfqq)
 448{
 449        /*
 450         * just an approximation, should be ok.
 451         */
 452        return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
 453                       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
 454}
 455
 456/*
 457 * The cfqd->service_tree holds all pending cfq_queue's that have
 458 * requests waiting to be processed. It is sorted in the order that
 459 * we will service the queues.
 460 */
 461static void cfq_service_tree_add(struct cfq_data *cfqd,
 462                                    struct cfq_queue *cfqq, int add_front)
 463{
 464        struct rb_node **p, *parent;
 465        struct cfq_queue *__cfqq;
 466        unsigned long rb_key;
 467        int left;
 468
 469        if (cfq_class_idle(cfqq)) {
 470                rb_key = CFQ_IDLE_DELAY;
 471                parent = rb_last(&cfqd->service_tree.rb);
 472                if (parent && parent != &cfqq->rb_node) {
 473                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
 474                        rb_key += __cfqq->rb_key;
 475                } else
 476                        rb_key += jiffies;
 477        } else if (!add_front) {
 478                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
 479                rb_key += cfqq->slice_resid;
 480                cfqq->slice_resid = 0;
 481        } else
 482                rb_key = 0;
 483
 484        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
 485                /*
 486                 * same position, nothing more to do
 487                 */
 488                if (rb_key == cfqq->rb_key)
 489                        return;
 490
 491                cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
 492        }
 493
 494        left = 1;
 495        parent = NULL;
 496        p = &cfqd->service_tree.rb.rb_node;
 497        while (*p) {
 498                struct rb_node **n;
 499
 500                parent = *p;
 501                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
 502
 503                /*
 504                 * sort RT queues first, we always want to give
 505                 * preference to them. IDLE queues goes to the back.
 506                 * after that, sort on the next service time.
 507                 */
 508                if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
 509                        n = &(*p)->rb_left;
 510                else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
 511                        n = &(*p)->rb_right;
 512                else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
 513                        n = &(*p)->rb_left;
 514                else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
 515                        n = &(*p)->rb_right;
 516                else if (rb_key < __cfqq->rb_key)
 517                        n = &(*p)->rb_left;
 518                else
 519                        n = &(*p)->rb_right;
 520
 521                if (n == &(*p)->rb_right)
 522                        left = 0;
 523
 524                p = n;
 525        }
 526
 527        if (left)
 528                cfqd->service_tree.left = &cfqq->rb_node;
 529
 530        cfqq->rb_key = rb_key;
 531        rb_link_node(&cfqq->rb_node, parent, p);
 532        rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
 533}
 534
 535/*
 536 * Update cfqq's position in the service tree.
 537 */
 538static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 539{
 540        /*
 541         * Resorting requires the cfqq to be on the RR list already.
 542         */
 543        if (cfq_cfqq_on_rr(cfqq))
 544                cfq_service_tree_add(cfqd, cfqq, 0);
 545}
 546
 547/*
 548 * add to busy list of queues for service, trying to be fair in ordering
 549 * the pending list according to last request service
 550 */
 551static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 552{
 553        cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
 554        BUG_ON(cfq_cfqq_on_rr(cfqq));
 555        cfq_mark_cfqq_on_rr(cfqq);
 556        cfqd->busy_queues++;
 557
 558        cfq_resort_rr_list(cfqd, cfqq);
 559}
 560
 561/*
 562 * Called when the cfqq no longer has requests pending, remove it from
 563 * the service tree.
 564 */
 565static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 566{
 567        cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
 568        BUG_ON(!cfq_cfqq_on_rr(cfqq));
 569        cfq_clear_cfqq_on_rr(cfqq);
 570
 571        if (!RB_EMPTY_NODE(&cfqq->rb_node))
 572                cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
 573
 574        BUG_ON(!cfqd->busy_queues);
 575        cfqd->busy_queues--;
 576}
 577
 578/*
 579 * rb tree support functions
 580 */
 581static void cfq_del_rq_rb(struct request *rq)
 582{
 583        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 584        struct cfq_data *cfqd = cfqq->cfqd;
 585        const int sync = rq_is_sync(rq);
 586
 587        BUG_ON(!cfqq->queued[sync]);
 588        cfqq->queued[sync]--;
 589
 590        elv_rb_del(&cfqq->sort_list, rq);
 591
 592        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
 593                cfq_del_cfqq_rr(cfqd, cfqq);
 594}
 595
 596static void cfq_add_rq_rb(struct request *rq)
 597{
 598        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 599        struct cfq_data *cfqd = cfqq->cfqd;
 600        struct request *__alias;
 601
 602        cfqq->queued[rq_is_sync(rq)]++;
 603
 604        /*
 605         * looks a little odd, but the first insert might return an alias.
 606         * if that happens, put the alias on the dispatch list
 607         */
 608        while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
 609                cfq_dispatch_insert(cfqd->queue, __alias);
 610
 611        if (!cfq_cfqq_on_rr(cfqq))
 612                cfq_add_cfqq_rr(cfqd, cfqq);
 613
 614        /*
 615         * check if this request is a better next-serve candidate
 616         */
 617        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
 618        BUG_ON(!cfqq->next_rq);
 619}
 620
 621static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
 622{
 623        elv_rb_del(&cfqq->sort_list, rq);
 624        cfqq->queued[rq_is_sync(rq)]--;
 625        cfq_add_rq_rb(rq);
 626}
 627
 628static struct request *
 629cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
 630{
 631        struct task_struct *tsk = current;
 632        struct cfq_io_context *cic;
 633        struct cfq_queue *cfqq;
 634
 635        cic = cfq_cic_lookup(cfqd, tsk->io_context);
 636        if (!cic)
 637                return NULL;
 638
 639        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
 640        if (cfqq) {
 641                sector_t sector = bio->bi_sector + bio_sectors(bio);
 642
 643                return elv_rb_find(&cfqq->sort_list, sector);
 644        }
 645
 646        return NULL;
 647}
 648
 649static void cfq_activate_request(struct request_queue *q, struct request *rq)
 650{
 651        struct cfq_data *cfqd = q->elevator->elevator_data;
 652
 653        cfqd->rq_in_driver++;
 654        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
 655                                                cfqd->rq_in_driver);
 656
 657        /*
 658         * If the depth is larger 1, it really could be queueing. But lets
 659         * make the mark a little higher - idling could still be good for
 660         * low queueing, and a low queueing number could also just indicate
 661         * a SCSI mid layer like behaviour where limit+1 is often seen.
 662         */
 663        if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
 664                cfqd->hw_tag = 1;
 665
 666        cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
 667}
 668
 669static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
 670{
 671        struct cfq_data *cfqd = q->elevator->elevator_data;
 672
 673        WARN_ON(!cfqd->rq_in_driver);
 674        cfqd->rq_in_driver--;
 675        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
 676                                                cfqd->rq_in_driver);
 677}
 678
 679static void cfq_remove_request(struct request *rq)
 680{
 681        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 682
 683        if (cfqq->next_rq == rq)
 684                cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
 685
 686        list_del_init(&rq->queuelist);
 687        cfq_del_rq_rb(rq);
 688
 689        if (rq_is_meta(rq)) {
 690                WARN_ON(!cfqq->meta_pending);
 691                cfqq->meta_pending--;
 692        }
 693}
 694
 695static int cfq_merge(struct request_queue *q, struct request **req,
 696                     struct bio *bio)
 697{
 698        struct cfq_data *cfqd = q->elevator->elevator_data;
 699        struct request *__rq;
 700
 701        __rq = cfq_find_rq_fmerge(cfqd, bio);
 702        if (__rq && elv_rq_merge_ok(__rq, bio)) {
 703                *req = __rq;
 704                return ELEVATOR_FRONT_MERGE;
 705        }
 706
 707        return ELEVATOR_NO_MERGE;
 708}
 709
 710static void cfq_merged_request(struct request_queue *q, struct request *req,
 711                               int type)
 712{
 713        if (type == ELEVATOR_FRONT_MERGE) {
 714                struct cfq_queue *cfqq = RQ_CFQQ(req);
 715
 716                cfq_reposition_rq_rb(cfqq, req);
 717        }
 718}
 719
 720static void
 721cfq_merged_requests(struct request_queue *q, struct request *rq,
 722                    struct request *next)
 723{
 724        /*
 725         * reposition in fifo if next is older than rq
 726         */
 727        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
 728            time_before(next->start_time, rq->start_time))
 729                list_move(&rq->queuelist, &next->queuelist);
 730
 731        cfq_remove_request(next);
 732}
 733
 734static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 735                           struct bio *bio)
 736{
 737        struct cfq_data *cfqd = q->elevator->elevator_data;
 738        struct cfq_io_context *cic;
 739        struct cfq_queue *cfqq;
 740
 741        /*
 742         * Disallow merge of a sync bio into an async request.
 743         */
 744        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
 745                return 0;
 746
 747        /*
 748         * Lookup the cfqq that this bio will be queued with. Allow
 749         * merge only if rq is queued there.
 750         */
 751        cic = cfq_cic_lookup(cfqd, current->io_context);
 752        if (!cic)
 753                return 0;
 754
 755        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
 756        if (cfqq == RQ_CFQQ(rq))
 757                return 1;
 758
 759        return 0;
 760}
 761
 762static void __cfq_set_active_queue(struct cfq_data *cfqd,
 763                                   struct cfq_queue *cfqq)
 764{
 765        if (cfqq) {
 766                cfq_log_cfqq(cfqd, cfqq, "set_active");
 767                cfqq->slice_end = 0;
 768                cfq_clear_cfqq_must_alloc_slice(cfqq);
 769                cfq_clear_cfqq_fifo_expire(cfqq);
 770                cfq_mark_cfqq_slice_new(cfqq);
 771                cfq_clear_cfqq_queue_new(cfqq);
 772        }
 773
 774        cfqd->active_queue = cfqq;
 775}
 776
 777/*
 778 * current cfqq expired its slice (or was too idle), select new one
 779 */
 780static void
 781__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 782                    int timed_out)
 783{
 784        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 785
 786        if (cfq_cfqq_wait_request(cfqq))
 787                del_timer(&cfqd->idle_slice_timer);
 788
 789        cfq_clear_cfqq_must_dispatch(cfqq);
 790        cfq_clear_cfqq_wait_request(cfqq);
 791
 792        /*
 793         * store what was left of this slice, if the queue idled/timed out
 794         */
 795        if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
 796                cfqq->slice_resid = cfqq->slice_end - jiffies;
 797                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
 798        }
 799
 800        cfq_resort_rr_list(cfqd, cfqq);
 801
 802        if (cfqq == cfqd->active_queue)
 803                cfqd->active_queue = NULL;
 804
 805        if (cfqd->active_cic) {
 806                put_io_context(cfqd->active_cic->ioc);
 807                cfqd->active_cic = NULL;
 808        }
 809}
 810
 811static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
 812{
 813        struct cfq_queue *cfqq = cfqd->active_queue;
 814
 815        if (cfqq)
 816                __cfq_slice_expired(cfqd, cfqq, timed_out);
 817}
 818
 819/*
 820 * Get next queue for service. Unless we have a queue preemption,
 821 * we'll simply select the first cfqq in the service tree.
 822 */
 823static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
 824{
 825        if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
 826                return NULL;
 827
 828        return cfq_rb_first(&cfqd->service_tree);
 829}
 830
 831/*
 832 * Get and set a new active queue for service.
 833 */
 834static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
 835{
 836        struct cfq_queue *cfqq;
 837
 838        cfqq = cfq_get_next_queue(cfqd);
 839        __cfq_set_active_queue(cfqd, cfqq);
 840        return cfqq;
 841}
 842
 843static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
 844                                          struct request *rq)
 845{
 846        if (rq->sector >= cfqd->last_position)
 847                return rq->sector - cfqd->last_position;
 848        else
 849                return cfqd->last_position - rq->sector;
 850}
 851
 852static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
 853{
 854        struct cfq_io_context *cic = cfqd->active_cic;
 855
 856        if (!sample_valid(cic->seek_samples))
 857                return 0;
 858
 859        return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
 860}
 861
 862static int cfq_close_cooperator(struct cfq_data *cfq_data,
 863                                struct cfq_queue *cfqq)
 864{
 865        /*
 866         * We should notice if some of the queues are cooperating, eg
 867         * working closely on the same area of the disk. In that case,
 868         * we can group them together and don't waste time idling.
 869         */
 870        return 0;
 871}
 872
 873#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
 874
 875static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 876{
 877        struct cfq_queue *cfqq = cfqd->active_queue;
 878        struct cfq_io_context *cic;
 879        unsigned long sl;
 880
 881        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
 882        WARN_ON(cfq_cfqq_slice_new(cfqq));
 883
 884        /*
 885         * idle is disabled, either manually or by past process history
 886         */
 887        if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
 888                return;
 889
 890        /*
 891         * still requests with the driver, don't idle
 892         */
 893        if (cfqd->rq_in_driver)
 894                return;
 895
 896        /*
 897         * task has exited, don't wait
 898         */
 899        cic = cfqd->active_cic;
 900        if (!cic || !atomic_read(&cic->ioc->nr_tasks))
 901                return;
 902
 903        /*
 904         * See if this prio level has a good candidate
 905         */
 906        if (cfq_close_cooperator(cfqd, cfqq) &&
 907            (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
 908                return;
 909
 910        cfq_mark_cfqq_must_dispatch(cfqq);
 911        cfq_mark_cfqq_wait_request(cfqq);
 912
 913        /*
 914         * we don't want to idle for seeks, but we do want to allow
 915         * fair distribution of slice time for a process doing back-to-back
 916         * seeks. so allow a little bit of time for him to submit a new rq
 917         */
 918        sl = cfqd->cfq_slice_idle;
 919        if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
 920                sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
 921
 922        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
 923        cfq_log(cfqd, "arm_idle: %lu", sl);
 924}
 925
 926/*
 927 * Move request from internal lists to the request queue dispatch list.
 928 */
 929static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
 930{
 931        struct cfq_data *cfqd = q->elevator->elevator_data;
 932        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 933
 934        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
 935
 936        cfq_remove_request(rq);
 937        cfqq->dispatched++;
 938        elv_dispatch_sort(q, rq);
 939
 940        if (cfq_cfqq_sync(cfqq))
 941                cfqd->sync_flight++;
 942}
 943
 944/*
 945 * return expired entry, or NULL to just start from scratch in rbtree
 946 */
 947static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 948{
 949        struct cfq_data *cfqd = cfqq->cfqd;
 950        struct request *rq;
 951        int fifo;
 952
 953        if (cfq_cfqq_fifo_expire(cfqq))
 954                return NULL;
 955
 956        cfq_mark_cfqq_fifo_expire(cfqq);
 957
 958        if (list_empty(&cfqq->fifo))
 959                return NULL;
 960
 961        fifo = cfq_cfqq_sync(cfqq);
 962        rq = rq_entry_fifo(cfqq->fifo.next);
 963
 964        if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
 965                rq = NULL;
 966
 967        cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
 968        return rq;
 969}
 970
 971static inline int
 972cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 973{
 974        const int base_rq = cfqd->cfq_slice_async_rq;
 975
 976        WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
 977
 978        return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
 979}
 980
 981/*
 982 * Select a queue for service. If we have a current active queue,
 983 * check whether to continue servicing it, or retrieve and set a new one.
 984 */
 985static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 986{
 987        struct cfq_queue *cfqq;
 988
 989        cfqq = cfqd->active_queue;
 990        if (!cfqq)
 991                goto new_queue;
 992
 993        /*
 994         * The active queue has run out of time, expire it and select new.
 995         */
 996        if (cfq_slice_used(cfqq))
 997                goto expire;
 998
 999        /*
1000         * The active queue has requests and isn't expired, allow it to
1001         * dispatch.
1002         */
1003        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1004                goto keep_queue;
1005
1006        /*
1007         * No requests pending. If the active queue still has requests in
1008         * flight or is idling for a new request, allow either of these
1009         * conditions to happen (or time out) before selecting a new queue.
1010         */
1011        if (timer_pending(&cfqd->idle_slice_timer) ||
1012            (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1013                cfqq = NULL;
1014                goto keep_queue;
1015        }
1016
1017expire:
1018        cfq_slice_expired(cfqd, 0);
1019new_queue:
1020        cfqq = cfq_set_active_queue(cfqd);
1021keep_queue:
1022        return cfqq;
1023}
1024
1025/*
1026 * Dispatch some requests from cfqq, moving them to the request queue
1027 * dispatch list.
1028 */
1029static int
1030__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1031                        int max_dispatch)
1032{
1033        int dispatched = 0;
1034
1035        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1036
1037        do {
1038                struct request *rq;
1039
1040                /*
1041                 * follow expired path, else get first next available
1042                 */
1043                rq = cfq_check_fifo(cfqq);
1044                if (rq == NULL)
1045                        rq = cfqq->next_rq;
1046
1047                /*
1048                 * finally, insert request into driver dispatch list
1049                 */
1050                cfq_dispatch_insert(cfqd->queue, rq);
1051
1052                dispatched++;
1053
1054                if (!cfqd->active_cic) {
1055                        atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1056                        cfqd->active_cic = RQ_CIC(rq);
1057                }
1058
1059                if (RB_EMPTY_ROOT(&cfqq->sort_list))
1060                        break;
1061
1062        } while (dispatched < max_dispatch);
1063
1064        /*
1065         * expire an async queue immediately if it has used up its slice. idle
1066         * queue always expire after 1 dispatch round.
1067         */
1068        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1069            dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1070            cfq_class_idle(cfqq))) {
1071                cfqq->slice_end = jiffies + 1;
1072                cfq_slice_expired(cfqd, 0);
1073        }
1074
1075        return dispatched;
1076}
1077
1078static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1079{
1080        int dispatched = 0;
1081
1082        while (cfqq->next_rq) {
1083                cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1084                dispatched++;
1085        }
1086
1087        BUG_ON(!list_empty(&cfqq->fifo));
1088        return dispatched;
1089}
1090
1091/*
1092 * Drain our current requests. Used for barriers and when switching
1093 * io schedulers on-the-fly.
1094 */
1095static int cfq_forced_dispatch(struct cfq_data *cfqd)
1096{
1097        struct cfq_queue *cfqq;
1098        int dispatched = 0;
1099
1100        while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
1101                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1102
1103        cfq_slice_expired(cfqd, 0);
1104
1105        BUG_ON(cfqd->busy_queues);
1106
1107        cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
1108        return dispatched;
1109}
1110
1111static int cfq_dispatch_requests(struct request_queue *q, int force)
1112{
1113        struct cfq_data *cfqd = q->elevator->elevator_data;
1114        struct cfq_queue *cfqq;
1115        int dispatched;
1116
1117        if (!cfqd->busy_queues)
1118                return 0;
1119
1120        if (unlikely(force))
1121                return cfq_forced_dispatch(cfqd);
1122
1123        dispatched = 0;
1124        while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1125                int max_dispatch;
1126
1127                max_dispatch = cfqd->cfq_quantum;
1128                if (cfq_class_idle(cfqq))
1129                        max_dispatch = 1;
1130
1131                if (cfqq->dispatched >= max_dispatch) {
1132                        if (cfqd->busy_queues > 1)
1133                                break;
1134                        if (cfqq->dispatched >= 4 * max_dispatch)
1135                                break;
1136                }
1137
1138                if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1139                        break;
1140
1141                cfq_clear_cfqq_must_dispatch(cfqq);
1142                cfq_clear_cfqq_wait_request(cfqq);
1143                del_timer(&cfqd->idle_slice_timer);
1144
1145                dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1146        }
1147
1148        cfq_log(cfqd, "dispatched=%d", dispatched);
1149        return dispatched;
1150}
1151
1152/*
1153 * task holds one reference to the queue, dropped when task exits. each rq
1154 * in-flight on this queue also holds a reference, dropped when rq is freed.
1155 *
1156 * queue lock must be held here.
1157 */
1158static void cfq_put_queue(struct cfq_queue *cfqq)
1159{
1160        struct cfq_data *cfqd = cfqq->cfqd;
1161
1162        BUG_ON(atomic_read(&cfqq->ref) <= 0);
1163
1164        if (!atomic_dec_and_test(&cfqq->ref))
1165                return;
1166
1167        cfq_log_cfqq(cfqd, cfqq, "put_queue");
1168        BUG_ON(rb_first(&cfqq->sort_list));
1169        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1170        BUG_ON(cfq_cfqq_on_rr(cfqq));
1171
1172        if (unlikely(cfqd->active_queue == cfqq)) {
1173                __cfq_slice_expired(cfqd, cfqq, 0);
1174                cfq_schedule_dispatch(cfqd);
1175        }
1176
1177        kmem_cache_free(cfq_pool, cfqq);
1178}
1179
1180/*
1181 * Must always be called with the rcu_read_lock() held
1182 */
1183static void
1184__call_for_each_cic(struct io_context *ioc,
1185                    void (*func)(struct io_context *, struct cfq_io_context *))
1186{
1187        struct cfq_io_context *cic;
1188        struct hlist_node *n;
1189
1190        hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1191                func(ioc, cic);
1192}
1193
1194/*
1195 * Call func for each cic attached to this ioc.
1196 */
1197static void
1198call_for_each_cic(struct io_context *ioc,
1199                  void (*func)(struct io_context *, struct cfq_io_context *))
1200{
1201        rcu_read_lock();
1202        __call_for_each_cic(ioc, func);
1203        rcu_read_unlock();
1204}
1205
1206static void cfq_cic_free_rcu(struct rcu_head *head)
1207{
1208        struct cfq_io_context *cic;
1209
1210        cic = container_of(head, struct cfq_io_context, rcu_head);
1211
1212        kmem_cache_free(cfq_ioc_pool, cic);
1213        elv_ioc_count_dec(ioc_count);
1214
1215        if (ioc_gone) {
1216                /*
1217                 * CFQ scheduler is exiting, grab exit lock and check
1218                 * the pending io context count. If it hits zero,
1219                 * complete ioc_gone and set it back to NULL
1220                 */
1221                spin_lock(&ioc_gone_lock);
1222                if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
1223                        complete(ioc_gone);
1224                        ioc_gone = NULL;
1225                }
1226                spin_unlock(&ioc_gone_lock);
1227        }
1228}
1229
1230static void cfq_cic_free(struct cfq_io_context *cic)
1231{
1232        call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1233}
1234
1235static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1236{
1237        unsigned long flags;
1238
1239        BUG_ON(!cic->dead_key);
1240
1241        spin_lock_irqsave(&ioc->lock, flags);
1242        radix_tree_delete(&ioc->radix_root, cic->dead_key);
1243        hlist_del_rcu(&cic->cic_list);
1244        spin_unlock_irqrestore(&ioc->lock, flags);
1245
1246        cfq_cic_free(cic);
1247}
1248
1249/*
1250 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1251 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1252 * and ->trim() which is called with the task lock held
1253 */
1254static void cfq_free_io_context(struct io_context *ioc)
1255{
1256        /*
1257         * ioc->refcount is zero here, or we are called from elv_unregister(),
1258         * so no more cic's are allowed to be linked into this ioc.  So it
1259         * should be ok to iterate over the known list, we will see all cic's
1260         * since no new ones are added.
1261         */
1262        __call_for_each_cic(ioc, cic_free_func);
1263}
1264
1265static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1266{
1267        if (unlikely(cfqq == cfqd->active_queue)) {
1268                __cfq_slice_expired(cfqd, cfqq, 0);
1269                cfq_schedule_dispatch(cfqd);
1270        }
1271
1272        cfq_put_queue(cfqq);
1273}
1274
1275static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1276                                         struct cfq_io_context *cic)
1277{
1278        struct io_context *ioc = cic->ioc;
1279
1280        list_del_init(&cic->queue_list);
1281
1282        /*
1283         * Make sure key == NULL is seen for dead queues
1284         */
1285        smp_wmb();
1286        cic->dead_key = (unsigned long) cic->key;
1287        cic->key = NULL;
1288
1289        rcu_read_lock();
1290        if (rcu_dereference(ioc->ioc_data) == cic) {
1291                rcu_read_unlock();
1292                spin_lock(&ioc->lock);
1293                rcu_assign_pointer(ioc->ioc_data, NULL);
1294                spin_unlock(&ioc->lock);
1295        } else
1296                rcu_read_unlock();
1297
1298        if (cic->cfqq[ASYNC]) {
1299                cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1300                cic->cfqq[ASYNC] = NULL;
1301        }
1302
1303        if (cic->cfqq[SYNC]) {
1304                cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1305                cic->cfqq[SYNC] = NULL;
1306        }
1307}
1308
1309static void cfq_exit_single_io_context(struct io_context *ioc,
1310                                       struct cfq_io_context *cic)
1311{
1312        struct cfq_data *cfqd = cic->key;
1313
1314        if (cfqd) {
1315                struct request_queue *q = cfqd->queue;
1316                unsigned long flags;
1317
1318                spin_lock_irqsave(q->queue_lock, flags);
1319                __cfq_exit_single_io_context(cfqd, cic);
1320                spin_unlock_irqrestore(q->queue_lock, flags);
1321        }
1322}
1323
1324/*
1325 * The process that ioc belongs to has exited, we need to clean up
1326 * and put the internal structures we have that belongs to that process.
1327 */
1328static void cfq_exit_io_context(struct io_context *ioc)
1329{
1330        call_for_each_cic(ioc, cfq_exit_single_io_context);
1331}
1332
1333static struct cfq_io_context *
1334cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1335{
1336        struct cfq_io_context *cic;
1337
1338        cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1339                                                        cfqd->queue->node);
1340        if (cic) {
1341                cic->last_end_request = jiffies;
1342                INIT_LIST_HEAD(&cic->queue_list);
1343                INIT_HLIST_NODE(&cic->cic_list);
1344                cic->dtor = cfq_free_io_context;
1345                cic->exit = cfq_exit_io_context;
1346                elv_ioc_count_inc(ioc_count);
1347        }
1348
1349        return cic;
1350}
1351
1352static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1353{
1354        struct task_struct *tsk = current;
1355        int ioprio_class;
1356
1357        if (!cfq_cfqq_prio_changed(cfqq))
1358                return;
1359
1360        ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1361        switch (ioprio_class) {
1362        default:
1363                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1364        case IOPRIO_CLASS_NONE:
1365                /*
1366                 * no prio set, inherit CPU scheduling settings
1367                 */
1368                cfqq->ioprio = task_nice_ioprio(tsk);
1369                cfqq->ioprio_class = task_nice_ioclass(tsk);
1370                break;
1371        case IOPRIO_CLASS_RT:
1372                cfqq->ioprio = task_ioprio(ioc);
1373                cfqq->ioprio_class = IOPRIO_CLASS_RT;
1374                break;
1375        case IOPRIO_CLASS_BE:
1376                cfqq->ioprio = task_ioprio(ioc);
1377                cfqq->ioprio_class = IOPRIO_CLASS_BE;
1378                break;
1379        case IOPRIO_CLASS_IDLE:
1380                cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1381                cfqq->ioprio = 7;
1382                cfq_clear_cfqq_idle_window(cfqq);
1383                break;
1384        }
1385
1386        /*
1387         * keep track of original prio settings in case we have to temporarily
1388         * elevate the priority of this queue
1389         */
1390        cfqq->org_ioprio = cfqq->ioprio;
1391        cfqq->org_ioprio_class = cfqq->ioprio_class;
1392        cfq_clear_cfqq_prio_changed(cfqq);
1393}
1394
1395static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1396{
1397        struct cfq_data *cfqd = cic->key;
1398        struct cfq_queue *cfqq;
1399        unsigned long flags;
1400
1401        if (unlikely(!cfqd))
1402                return;
1403
1404        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1405
1406        cfqq = cic->cfqq[ASYNC];
1407        if (cfqq) {
1408                struct cfq_queue *new_cfqq;
1409                new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
1410                if (new_cfqq) {
1411                        cic->cfqq[ASYNC] = new_cfqq;
1412                        cfq_put_queue(cfqq);
1413                }
1414        }
1415
1416        cfqq = cic->cfqq[SYNC];
1417        if (cfqq)
1418                cfq_mark_cfqq_prio_changed(cfqq);
1419
1420        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1421}
1422
1423static void cfq_ioc_set_ioprio(struct io_context *ioc)
1424{
1425        call_for_each_cic(ioc, changed_ioprio);
1426        ioc->ioprio_changed = 0;
1427}
1428
1429static struct cfq_queue *
1430cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1431                     struct io_context *ioc, gfp_t gfp_mask)
1432{
1433        struct cfq_queue *cfqq, *new_cfqq = NULL;
1434        struct cfq_io_context *cic;
1435
1436retry:
1437        cic = cfq_cic_lookup(cfqd, ioc);
1438        /* cic always exists here */
1439        cfqq = cic_to_cfqq(cic, is_sync);
1440
1441        if (!cfqq) {
1442                if (new_cfqq) {
1443                        cfqq = new_cfqq;
1444                        new_cfqq = NULL;
1445                } else if (gfp_mask & __GFP_WAIT) {
1446                        /*
1447                         * Inform the allocator of the fact that we will
1448                         * just repeat this allocation if it fails, to allow
1449                         * the allocator to do whatever it needs to attempt to
1450                         * free memory.
1451                         */
1452                        spin_unlock_irq(cfqd->queue->queue_lock);
1453                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
1454                                        gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
1455                                        cfqd->queue->node);
1456                        spin_lock_irq(cfqd->queue->queue_lock);
1457                        goto retry;
1458                } else {
1459                        cfqq = kmem_cache_alloc_node(cfq_pool,
1460                                        gfp_mask | __GFP_ZERO,
1461                                        cfqd->queue->node);
1462                        if (!cfqq)
1463                                goto out;
1464                }
1465
1466                RB_CLEAR_NODE(&cfqq->rb_node);
1467                INIT_LIST_HEAD(&cfqq->fifo);
1468
1469                atomic_set(&cfqq->ref, 0);
1470                cfqq->cfqd = cfqd;
1471
1472                cfq_mark_cfqq_prio_changed(cfqq);
1473                cfq_mark_cfqq_queue_new(cfqq);
1474
1475                cfq_init_prio_data(cfqq, ioc);
1476
1477                if (is_sync) {
1478                        if (!cfq_class_idle(cfqq))
1479                                cfq_mark_cfqq_idle_window(cfqq);
1480                        cfq_mark_cfqq_sync(cfqq);
1481                }
1482                cfqq->pid = current->pid;
1483                cfq_log_cfqq(cfqd, cfqq, "alloced");
1484        }
1485
1486        if (new_cfqq)
1487                kmem_cache_free(cfq_pool, new_cfqq);
1488
1489out:
1490        WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1491        return cfqq;
1492}
1493
1494static struct cfq_queue **
1495cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1496{
1497        switch (ioprio_class) {
1498        case IOPRIO_CLASS_RT:
1499                return &cfqd->async_cfqq[0][ioprio];
1500        case IOPRIO_CLASS_BE:
1501                return &cfqd->async_cfqq[1][ioprio];
1502        case IOPRIO_CLASS_IDLE:
1503                return &cfqd->async_idle_cfqq;
1504        default:
1505                BUG();
1506        }
1507}
1508
1509static struct cfq_queue *
1510cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
1511              gfp_t gfp_mask)
1512{
1513        const int ioprio = task_ioprio(ioc);
1514        const int ioprio_class = task_ioprio_class(ioc);
1515        struct cfq_queue **async_cfqq = NULL;
1516        struct cfq_queue *cfqq = NULL;
1517
1518        if (!is_sync) {
1519                async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1520                cfqq = *async_cfqq;
1521        }
1522
1523        if (!cfqq) {
1524                cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1525                if (!cfqq)
1526                        return NULL;
1527        }
1528
1529        /*
1530         * pin the queue now that it's allocated, scheduler exit will prune it
1531         */
1532        if (!is_sync && !(*async_cfqq)) {
1533                atomic_inc(&cfqq->ref);
1534                *async_cfqq = cfqq;
1535        }
1536
1537        atomic_inc(&cfqq->ref);
1538        return cfqq;
1539}
1540
1541/*
1542 * We drop cfq io contexts lazily, so we may find a dead one.
1543 */
1544static void
1545cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1546                  struct cfq_io_context *cic)
1547{
1548        unsigned long flags;
1549
1550        WARN_ON(!list_empty(&cic->queue_list));
1551
1552        spin_lock_irqsave(&ioc->lock, flags);
1553
1554        BUG_ON(ioc->ioc_data == cic);
1555
1556        radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1557        hlist_del_rcu(&cic->cic_list);
1558        spin_unlock_irqrestore(&ioc->lock, flags);
1559
1560        cfq_cic_free(cic);
1561}
1562
1563static struct cfq_io_context *
1564cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1565{
1566        struct cfq_io_context *cic;
1567        unsigned long flags;
1568        void *k;
1569
1570        if (unlikely(!ioc))
1571                return NULL;
1572
1573        rcu_read_lock();
1574
1575        /*
1576         * we maintain a last-hit cache, to avoid browsing over the tree
1577         */
1578        cic = rcu_dereference(ioc->ioc_data);
1579        if (cic && cic->key == cfqd) {
1580                rcu_read_unlock();
1581                return cic;
1582        }
1583
1584        do {
1585                cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1586                rcu_read_unlock();
1587                if (!cic)
1588                        break;
1589                /* ->key must be copied to avoid race with cfq_exit_queue() */
1590                k = cic->key;
1591                if (unlikely(!k)) {
1592                        cfq_drop_dead_cic(cfqd, ioc, cic);
1593                        rcu_read_lock();
1594                        continue;
1595                }
1596
1597                spin_lock_irqsave(&ioc->lock, flags);
1598                rcu_assign_pointer(ioc->ioc_data, cic);
1599                spin_unlock_irqrestore(&ioc->lock, flags);
1600                break;
1601        } while (1);
1602
1603        return cic;
1604}
1605
1606/*
1607 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
1608 * the process specific cfq io context when entered from the block layer.
1609 * Also adds the cic to a per-cfqd list, used when this queue is removed.
1610 */
1611static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1612                        struct cfq_io_context *cic, gfp_t gfp_mask)
1613{
1614        unsigned long flags;
1615        int ret;
1616
1617        ret = radix_tree_preload(gfp_mask);
1618        if (!ret) {
1619                cic->ioc = ioc;
1620                cic->key = cfqd;
1621
1622                spin_lock_irqsave(&ioc->lock, flags);
1623                ret = radix_tree_insert(&ioc->radix_root,
1624                                                (unsigned long) cfqd, cic);
1625                if (!ret)
1626                        hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1627                spin_unlock_irqrestore(&ioc->lock, flags);
1628
1629                radix_tree_preload_end();
1630
1631                if (!ret) {
1632                        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1633                        list_add(&cic->queue_list, &cfqd->cic_list);
1634                        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1635                }
1636        }
1637
1638        if (ret)
1639                printk(KERN_ERR "cfq: cic link failed!\n");
1640
1641        return ret;
1642}
1643
1644/*
1645 * Setup general io context and cfq io context. There can be several cfq
1646 * io contexts per general io context, if this process is doing io to more
1647 * than one device managed by cfq.
1648 */
1649static struct cfq_io_context *
1650cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1651{
1652        struct io_context *ioc = NULL;
1653        struct cfq_io_context *cic;
1654
1655        might_sleep_if(gfp_mask & __GFP_WAIT);
1656
1657        ioc = get_io_context(gfp_mask, cfqd->queue->node);
1658        if (!ioc)
1659                return NULL;
1660
1661        cic = cfq_cic_lookup(cfqd, ioc);
1662        if (cic)
1663                goto out;
1664
1665        cic = cfq_alloc_io_context(cfqd, gfp_mask);
1666        if (cic == NULL)
1667                goto err;
1668
1669        if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1670                goto err_free;
1671
1672out:
1673        smp_read_barrier_depends();
1674        if (unlikely(ioc->ioprio_changed))
1675                cfq_ioc_set_ioprio(ioc);
1676
1677        return cic;
1678err_free:
1679        cfq_cic_free(cic);
1680err:
1681        put_io_context(ioc);
1682        return NULL;
1683}
1684
1685static void
1686cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1687{
1688        unsigned long elapsed = jiffies - cic->last_end_request;
1689        unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1690
1691        cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1692        cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1693        cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1694}
1695
1696static void
1697cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1698                       struct request *rq)
1699{
1700        sector_t sdist;
1701        u64 total;
1702
1703        if (cic->last_request_pos < rq->sector)
1704                sdist = rq->sector - cic->last_request_pos;
1705        else
1706                sdist = cic->last_request_pos - rq->sector;
1707
1708        /*
1709         * Don't allow the seek distance to get too large from the
1710         * odd fragment, pagein, etc
1711         */
1712        if (cic->seek_samples <= 60) /* second&third seek */
1713                sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1714        else
1715                sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1716
1717        cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1718        cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1719        total = cic->seek_total + (cic->seek_samples/2);
1720        do_div(total, cic->seek_samples);
1721        cic->seek_mean = (sector_t)total;
1722}
1723
1724/*
1725 * Disable idle window if the process thinks too long or seeks so much that
1726 * it doesn't matter
1727 */
1728static void
1729cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1730                       struct cfq_io_context *cic)
1731{
1732        int old_idle, enable_idle;
1733
1734        /*
1735         * Don't idle for async or idle io prio class
1736         */
1737        if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1738                return;
1739
1740        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1741
1742        if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1743            (cfqd->hw_tag && CIC_SEEKY(cic)))
1744                enable_idle = 0;
1745        else if (sample_valid(cic->ttime_samples)) {
1746                if (cic->ttime_mean > cfqd->cfq_slice_idle)
1747                        enable_idle = 0;
1748                else
1749                        enable_idle = 1;
1750        }
1751
1752        if (old_idle != enable_idle) {
1753                cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
1754                if (enable_idle)
1755                        cfq_mark_cfqq_idle_window(cfqq);
1756                else
1757                        cfq_clear_cfqq_idle_window(cfqq);
1758        }
1759}
1760
1761/*
1762 * Check if new_cfqq should preempt the currently active queue. Return 0 for
1763 * no or if we aren't sure, a 1 will cause a preempt.
1764 */
1765static int
1766cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1767                   struct request *rq)
1768{
1769        struct cfq_queue *cfqq;
1770
1771        cfqq = cfqd->active_queue;
1772        if (!cfqq)
1773                return 0;
1774
1775        if (cfq_slice_used(cfqq))
1776                return 1;
1777
1778        if (cfq_class_idle(new_cfqq))
1779                return 0;
1780
1781        if (cfq_class_idle(cfqq))
1782                return 1;
1783
1784        /*
1785         * if the new request is sync, but the currently running queue is
1786         * not, let the sync request have priority.
1787         */
1788        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1789                return 1;
1790
1791        /*
1792         * So both queues are sync. Let the new request get disk time if
1793         * it's a metadata request and the current queue is doing regular IO.
1794         */
1795        if (rq_is_meta(rq) && !cfqq->meta_pending)
1796                return 1;
1797
1798        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1799                return 0;
1800
1801        /*
1802         * if this request is as-good as one we would expect from the
1803         * current cfqq, let it preempt
1804         */
1805        if (cfq_rq_close(cfqd, rq))
1806                return 1;
1807
1808        return 0;
1809}
1810
1811/*
1812 * cfqq preempts the active queue. if we allowed preempt with no slice left,
1813 * let it have half of its nominal slice.
1814 */
1815static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1816{
1817        cfq_log_cfqq(cfqd, cfqq, "preempt");
1818        cfq_slice_expired(cfqd, 1);
1819
1820        /*
1821         * Put the new queue at the front of the of the current list,
1822         * so we know that it will be selected next.
1823         */
1824        BUG_ON(!cfq_cfqq_on_rr(cfqq));
1825
1826        cfq_service_tree_add(cfqd, cfqq, 1);
1827
1828        cfqq->slice_end = 0;
1829        cfq_mark_cfqq_slice_new(cfqq);
1830}
1831
1832/*
1833 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1834 * something we should do about it
1835 */
1836static void
1837cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1838                struct request *rq)
1839{
1840        struct cfq_io_context *cic = RQ_CIC(rq);
1841
1842        if (rq_is_meta(rq))
1843                cfqq->meta_pending++;
1844
1845        cfq_update_io_thinktime(cfqd, cic);
1846        cfq_update_io_seektime(cfqd, cic, rq);
1847        cfq_update_idle_window(cfqd, cfqq, cic);
1848
1849        cic->last_request_pos = rq->sector + rq->nr_sectors;
1850
1851        if (cfqq == cfqd->active_queue) {
1852                /*
1853                 * if we are waiting for a request for this queue, let it rip
1854                 * immediately and flag that we must not expire this queue
1855                 * just now
1856                 */
1857                if (cfq_cfqq_wait_request(cfqq)) {
1858                        cfq_mark_cfqq_must_dispatch(cfqq);
1859                        del_timer(&cfqd->idle_slice_timer);
1860                        blk_start_queueing(cfqd->queue);
1861                }
1862        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1863                /*
1864                 * not the active queue - expire current slice if it is
1865                 * idle and has expired it's mean thinktime or this new queue
1866                 * has some old slice time left and is of higher priority
1867                 */
1868                cfq_preempt_queue(cfqd, cfqq);
1869                cfq_mark_cfqq_must_dispatch(cfqq);
1870                blk_start_queueing(cfqd->queue);
1871        }
1872}
1873
1874static void cfq_insert_request(struct request_queue *q, struct request *rq)
1875{
1876        struct cfq_data *cfqd = q->elevator->elevator_data;
1877        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1878
1879        cfq_log_cfqq(cfqd, cfqq, "insert_request");
1880        cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
1881
1882        cfq_add_rq_rb(rq);
1883
1884        list_add_tail(&rq->queuelist, &cfqq->fifo);
1885
1886        cfq_rq_enqueued(cfqd, cfqq, rq);
1887}
1888
1889static void cfq_completed_request(struct request_queue *q, struct request *rq)
1890{
1891        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1892        struct cfq_data *cfqd = cfqq->cfqd;
1893        const int sync = rq_is_sync(rq);
1894        unsigned long now;
1895
1896        now = jiffies;
1897        cfq_log_cfqq(cfqd, cfqq, "complete");
1898
1899        WARN_ON(!cfqd->rq_in_driver);
1900        WARN_ON(!cfqq->dispatched);
1901        cfqd->rq_in_driver--;
1902        cfqq->dispatched--;
1903
1904        if (cfq_cfqq_sync(cfqq))
1905                cfqd->sync_flight--;
1906
1907        if (!cfq_class_idle(cfqq))
1908                cfqd->last_end_request = now;
1909
1910        if (sync)
1911                RQ_CIC(rq)->last_end_request = now;
1912
1913        /*
1914         * If this is the active queue, check if it needs to be expired,
1915         * or if we want to idle in case it has no pending requests.
1916         */
1917        if (cfqd->active_queue == cfqq) {
1918                if (cfq_cfqq_slice_new(cfqq)) {
1919                        cfq_set_prio_slice(cfqd, cfqq);
1920                        cfq_clear_cfqq_slice_new(cfqq);
1921                }
1922                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
1923                        cfq_slice_expired(cfqd, 1);
1924                else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
1925                        cfq_arm_slice_timer(cfqd);
1926        }
1927
1928        if (!cfqd->rq_in_driver)
1929                cfq_schedule_dispatch(cfqd);
1930}
1931
1932/*
1933 * we temporarily boost lower priority queues if they are holding fs exclusive
1934 * resources. they are boosted to normal prio (CLASS_BE/4)
1935 */
1936static void cfq_prio_boost(struct cfq_queue *cfqq)
1937{
1938        if (has_fs_excl()) {
1939                /*
1940                 * boost idle prio on transactions that would lock out other
1941                 * users of the filesystem
1942                 */
1943                if (cfq_class_idle(cfqq))
1944                        cfqq->ioprio_class = IOPRIO_CLASS_BE;
1945                if (cfqq->ioprio > IOPRIO_NORM)
1946                        cfqq->ioprio = IOPRIO_NORM;
1947        } else {
1948                /*
1949                 * check if we need to unboost the queue
1950                 */
1951                if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1952                        cfqq->ioprio_class = cfqq->org_ioprio_class;
1953                if (cfqq->ioprio != cfqq->org_ioprio)
1954                        cfqq->ioprio = cfqq->org_ioprio;
1955        }
1956}
1957
1958static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1959{
1960        if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1961            !cfq_cfqq_must_alloc_slice(cfqq)) {
1962                cfq_mark_cfqq_must_alloc_slice(cfqq);
1963                return ELV_MQUEUE_MUST;
1964        }
1965
1966        return ELV_MQUEUE_MAY;
1967}
1968
1969static int cfq_may_queue(struct request_queue *q, int rw)
1970{
1971        struct cfq_data *cfqd = q->elevator->elevator_data;
1972        struct task_struct *tsk = current;
1973        struct cfq_io_context *cic;
1974        struct cfq_queue *cfqq;
1975
1976        /*
1977         * don't force setup of a queue from here, as a call to may_queue
1978         * does not necessarily imply that a request actually will be queued.
1979         * so just lookup a possibly existing queue, or return 'may queue'
1980         * if that fails
1981         */
1982        cic = cfq_cic_lookup(cfqd, tsk->io_context);
1983        if (!cic)
1984                return ELV_MQUEUE_MAY;
1985
1986        cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
1987        if (cfqq) {
1988                cfq_init_prio_data(cfqq, cic->ioc);
1989                cfq_prio_boost(cfqq);
1990
1991                return __cfq_may_queue(cfqq);
1992        }
1993
1994        return ELV_MQUEUE_MAY;
1995}
1996
1997/*
1998 * queue lock held here
1999 */
2000static void cfq_put_request(struct request *rq)
2001{
2002        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2003
2004        if (cfqq) {
2005                const int rw = rq_data_dir(rq);
2006
2007                BUG_ON(!cfqq->allocated[rw]);
2008                cfqq->allocated[rw]--;
2009
2010                put_io_context(RQ_CIC(rq)->ioc);
2011
2012                rq->elevator_private = NULL;
2013                rq->elevator_private2 = NULL;
2014
2015                cfq_put_queue(cfqq);
2016        }
2017}
2018
2019/*
2020 * Allocate cfq data structures associated with this request.
2021 */
2022static int
2023cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2024{
2025        struct cfq_data *cfqd = q->elevator->elevator_data;
2026        struct cfq_io_context *cic;
2027        const int rw = rq_data_dir(rq);
2028        const int is_sync = rq_is_sync(rq);
2029        struct cfq_queue *cfqq;
2030        unsigned long flags;
2031
2032        might_sleep_if(gfp_mask & __GFP_WAIT);
2033
2034        cic = cfq_get_io_context(cfqd, gfp_mask);
2035
2036        spin_lock_irqsave(q->queue_lock, flags);
2037
2038        if (!cic)
2039                goto queue_fail;
2040
2041        cfqq = cic_to_cfqq(cic, is_sync);
2042        if (!cfqq) {
2043                cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2044
2045                if (!cfqq)
2046                        goto queue_fail;
2047
2048                cic_set_cfqq(cic, cfqq, is_sync);
2049        }
2050
2051        cfqq->allocated[rw]++;
2052        cfq_clear_cfqq_must_alloc(cfqq);
2053        atomic_inc(&cfqq->ref);
2054
2055        spin_unlock_irqrestore(q->queue_lock, flags);
2056
2057        rq->elevator_private = cic;
2058        rq->elevator_private2 = cfqq;
2059        return 0;
2060
2061queue_fail:
2062        if (cic)
2063                put_io_context(cic->ioc);
2064
2065        cfq_schedule_dispatch(cfqd);
2066        spin_unlock_irqrestore(q->queue_lock, flags);
2067        cfq_log(cfqd, "set_request fail");
2068        return 1;
2069}
2070
2071static void cfq_kick_queue(struct work_struct *work)
2072{
2073        struct cfq_data *cfqd =
2074                container_of(work, struct cfq_data, unplug_work);
2075        struct request_queue *q = cfqd->queue;
2076        unsigned long flags;
2077
2078        spin_lock_irqsave(q->queue_lock, flags);
2079        blk_start_queueing(q);
2080        spin_unlock_irqrestore(q->queue_lock, flags);
2081}
2082
2083/*
2084 * Timer running if the active_queue is currently idling inside its time slice
2085 */
2086static void cfq_idle_slice_timer(unsigned long data)
2087{
2088        struct cfq_data *cfqd = (struct cfq_data *) data;
2089        struct cfq_queue *cfqq;
2090        unsigned long flags;
2091        int timed_out = 1;
2092
2093        cfq_log(cfqd, "idle timer fired");
2094
2095        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2096
2097        cfqq = cfqd->active_queue;
2098        if (cfqq) {
2099                timed_out = 0;
2100
2101                /*
2102                 * expired
2103                 */
2104                if (cfq_slice_used(cfqq))
2105                        goto expire;
2106
2107                /*
2108                 * only expire and reinvoke request handler, if there are
2109                 * other queues with pending requests
2110                 */
2111                if (!cfqd->busy_queues)
2112                        goto out_cont;
2113
2114                /*
2115                 * not expired and it has a request pending, let it dispatch
2116                 */
2117                if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
2118                        cfq_mark_cfqq_must_dispatch(cfqq);
2119                        goto out_kick;
2120                }
2121        }
2122expire:
2123        cfq_slice_expired(cfqd, timed_out);
2124out_kick:
2125        cfq_schedule_dispatch(cfqd);
2126out_cont:
2127        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2128}
2129
2130static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2131{
2132        del_timer_sync(&cfqd->idle_slice_timer);
2133        kblockd_flush_work(&cfqd->unplug_work);
2134}
2135
2136static void cfq_put_async_queues(struct cfq_data *cfqd)
2137{
2138        int i;
2139
2140        for (i = 0; i < IOPRIO_BE_NR; i++) {
2141                if (cfqd->async_cfqq[0][i])
2142                        cfq_put_queue(cfqd->async_cfqq[0][i]);
2143                if (cfqd->async_cfqq[1][i])
2144                        cfq_put_queue(cfqd->async_cfqq[1][i]);
2145        }
2146
2147        if (cfqd->async_idle_cfqq)
2148                cfq_put_queue(cfqd->async_idle_cfqq);
2149}
2150
2151static void cfq_exit_queue(elevator_t *e)
2152{
2153        struct cfq_data *cfqd = e->elevator_data;
2154        struct request_queue *q = cfqd->queue;
2155
2156        cfq_shutdown_timer_wq(cfqd);
2157
2158        spin_lock_irq(q->queue_lock);
2159
2160        if (cfqd->active_queue)
2161                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2162
2163        while (!list_empty(&cfqd->cic_list)) {
2164                struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2165                                                        struct cfq_io_context,
2166                                                        queue_list);
2167
2168                __cfq_exit_single_io_context(cfqd, cic);
2169        }
2170
2171        cfq_put_async_queues(cfqd);
2172
2173        spin_unlock_irq(q->queue_lock);
2174
2175        cfq_shutdown_timer_wq(cfqd);
2176
2177        kfree(cfqd);
2178}
2179
2180static void *cfq_init_queue(struct request_queue *q)
2181{
2182        struct cfq_data *cfqd;
2183
2184        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2185        if (!cfqd)
2186                return NULL;
2187
2188        cfqd->service_tree = CFQ_RB_ROOT;
2189        INIT_LIST_HEAD(&cfqd->cic_list);
2190
2191        cfqd->queue = q;
2192
2193        init_timer(&cfqd->idle_slice_timer);
2194        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2195        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2196
2197        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2198
2199        cfqd->last_end_request = jiffies;
2200        cfqd->cfq_quantum = cfq_quantum;
2201        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2202        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2203        cfqd->cfq_back_max = cfq_back_max;
2204        cfqd->cfq_back_penalty = cfq_back_penalty;
2205        cfqd->cfq_slice[0] = cfq_slice_async;
2206        cfqd->cfq_slice[1] = cfq_slice_sync;
2207        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2208        cfqd->cfq_slice_idle = cfq_slice_idle;
2209
2210        return cfqd;
2211}
2212
2213static void cfq_slab_kill(void)
2214{
2215        /*
2216         * Caller already ensured that pending RCU callbacks are completed,
2217         * so we should have no busy allocations at this point.
2218         */
2219        if (cfq_pool)
2220                kmem_cache_destroy(cfq_pool);
2221        if (cfq_ioc_pool)
2222                kmem_cache_destroy(cfq_ioc_pool);
2223}
2224
2225static int __init cfq_slab_setup(void)
2226{
2227        cfq_pool = KMEM_CACHE(cfq_queue, 0);
2228        if (!cfq_pool)
2229                goto fail;
2230
2231        cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2232        if (!cfq_ioc_pool)
2233                goto fail;
2234
2235        return 0;
2236fail:
2237        cfq_slab_kill();
2238        return -ENOMEM;
2239}
2240
2241/*
2242 * sysfs parts below -->
2243 */
2244static ssize_t
2245cfq_var_show(unsigned int var, char *page)
2246{
2247        return sprintf(page, "%d\n", var);
2248}
2249
2250static ssize_t
2251cfq_var_store(unsigned int *var, const char *page, size_t count)
2252{
2253        char *p = (char *) page;
2254
2255        *var = simple_strtoul(p, &p, 10);
2256        return count;
2257}
2258
2259#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2260static ssize_t __FUNC(elevator_t *e, char *page)                        \
2261{                                                                       \
2262        struct cfq_data *cfqd = e->elevator_data;                       \
2263        unsigned int __data = __VAR;                                    \
2264        if (__CONV)                                                     \
2265                __data = jiffies_to_msecs(__data);                      \
2266        return cfq_var_show(__data, (page));                            \
2267}
2268SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2269SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2270SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2271SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2272SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2273SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2274SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2275SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2276SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2277#undef SHOW_FUNCTION
2278
2279#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2280static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2281{                                                                       \
2282        struct cfq_data *cfqd = e->elevator_data;                       \
2283        unsigned int __data;                                            \
2284        int ret = cfq_var_store(&__data, (page), count);                \
2285        if (__data < (MIN))                                             \
2286                __data = (MIN);                                         \
2287        else if (__data > (MAX))                                        \
2288                __data = (MAX);                                         \
2289        if (__CONV)                                                     \
2290                *(__PTR) = msecs_to_jiffies(__data);                    \
2291        else                                                            \
2292                *(__PTR) = __data;                                      \
2293        return ret;                                                     \
2294}
2295STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2296STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2297                UINT_MAX, 1);
2298STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2299                UINT_MAX, 1);
2300STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2301STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2302                UINT_MAX, 0);
2303STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2304STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2305STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2306STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2307                UINT_MAX, 0);
2308#undef STORE_FUNCTION
2309
2310#define CFQ_ATTR(name) \
2311        __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2312
2313static struct elv_fs_entry cfq_attrs[] = {
2314        CFQ_ATTR(quantum),
2315        CFQ_ATTR(fifo_expire_sync),
2316        CFQ_ATTR(fifo_expire_async),
2317        CFQ_ATTR(back_seek_max),
2318        CFQ_ATTR(back_seek_penalty),
2319        CFQ_ATTR(slice_sync),
2320        CFQ_ATTR(slice_async),
2321        CFQ_ATTR(slice_async_rq),
2322        CFQ_ATTR(slice_idle),
2323        __ATTR_NULL
2324};
2325
2326static struct elevator_type iosched_cfq = {
2327        .ops = {
2328                .elevator_merge_fn =            cfq_merge,
2329                .elevator_merged_fn =           cfq_merged_request,
2330                .elevator_merge_req_fn =        cfq_merged_requests,
2331                .elevator_allow_merge_fn =      cfq_allow_merge,
2332                .elevator_dispatch_fn =         cfq_dispatch_requests,
2333                .elevator_add_req_fn =          cfq_insert_request,
2334                .elevator_activate_req_fn =     cfq_activate_request,
2335                .elevator_deactivate_req_fn =   cfq_deactivate_request,
2336                .elevator_queue_empty_fn =      cfq_queue_empty,
2337                .elevator_completed_req_fn =    cfq_completed_request,
2338                .elevator_former_req_fn =       elv_rb_former_request,
2339                .elevator_latter_req_fn =       elv_rb_latter_request,
2340                .elevator_set_req_fn =          cfq_set_request,
2341                .elevator_put_req_fn =          cfq_put_request,
2342                .elevator_may_queue_fn =        cfq_may_queue,
2343                .elevator_init_fn =             cfq_init_queue,
2344                .elevator_exit_fn =             cfq_exit_queue,
2345                .trim =                         cfq_free_io_context,
2346        },
2347        .elevator_attrs =       cfq_attrs,
2348        .elevator_name =        "cfq",
2349        .elevator_owner =       THIS_MODULE,
2350};
2351
2352static int __init cfq_init(void)
2353{
2354        /*
2355         * could be 0 on HZ < 1000 setups
2356         */
2357        if (!cfq_slice_async)
2358                cfq_slice_async = 1;
2359        if (!cfq_slice_idle)
2360                cfq_slice_idle = 1;
2361
2362        if (cfq_slab_setup())
2363                return -ENOMEM;
2364
2365        elv_register(&iosched_cfq);
2366
2367        return 0;
2368}
2369
2370static void __exit cfq_exit(void)
2371{
2372        DECLARE_COMPLETION_ONSTACK(all_gone);
2373        elv_unregister(&iosched_cfq);
2374        ioc_gone = &all_gone;
2375        /* ioc_gone's update must be visible before reading ioc_count */
2376        smp_wmb();
2377
2378        /*
2379         * this also protects us from entering cfq_slab_kill() with
2380         * pending RCU callbacks
2381         */
2382        if (elv_ioc_count_read(ioc_count))
2383                wait_for_completion(&all_gone);
2384        cfq_slab_kill();
2385}
2386
2387module_init(cfq_init);
2388module_exit(cfq_exit);
2389
2390MODULE_AUTHOR("Jens Axboe");
2391MODULE_LICENSE("GPL");
2392MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
2393