linux/block/cfq-iosched.c
<<
>>
Prefs
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
  17#include "cfq.h"
  18
  19/*
  20 * tunables
  21 */
  22/* max queue in one round of service */
  23static const int cfq_quantum = 8;
  24static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  25/* maximum backwards seek, in KiB */
  26static const int cfq_back_max = 16 * 1024;
  27/* penalty of a backwards seek */
  28static const int cfq_back_penalty = 2;
  29static const int cfq_slice_sync = HZ / 10;
  30static int cfq_slice_async = HZ / 25;
  31static const int cfq_slice_async_rq = 2;
  32static int cfq_slice_idle = HZ / 125;
  33static int cfq_group_idle = HZ / 125;
  34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  35static const int cfq_hist_divisor = 4;
  36
  37/*
  38 * offset from end of service tree
  39 */
  40#define CFQ_IDLE_DELAY          (HZ / 5)
  41
  42/*
  43 * below this threshold, we consider thinktime immediate
  44 */
  45#define CFQ_MIN_TT              (2)
  46
  47#define CFQ_SLICE_SCALE         (5)
  48#define CFQ_HW_QUEUE_MIN        (5)
  49#define CFQ_SERVICE_SHIFT       12
  50
  51#define CFQQ_SEEK_THR           (sector_t)(8 * 100)
  52#define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
  53#define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
  54#define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
  55
  56#define RQ_CIC(rq)              \
  57        ((struct cfq_io_context *) (rq)->elevator_private[0])
  58#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private[1])
  59#define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elevator_private[2])
  60
  61static struct kmem_cache *cfq_pool;
  62static struct kmem_cache *cfq_ioc_pool;
  63
  64static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
  65static struct completion *ioc_gone;
  66static DEFINE_SPINLOCK(ioc_gone_lock);
  67
  68static DEFINE_SPINLOCK(cic_index_lock);
  69static DEFINE_IDA(cic_index_ida);
  70
  71#define CFQ_PRIO_LISTS          IOPRIO_BE_NR
  72#define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  73#define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  74
  75#define sample_valid(samples)   ((samples) > 80)
  76#define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
  77
  78/*
  79 * Most of our rbtree usage is for sorting with min extraction, so
  80 * if we cache the leftmost node we don't have to walk down the tree
  81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  82 * move this into the elevator for the rq sorting as well.
  83 */
  84struct cfq_rb_root {
  85        struct rb_root rb;
  86        struct rb_node *left;
  87        unsigned count;
  88        unsigned total_weight;
  89        u64 min_vdisktime;
  90};
  91#define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
  92                        .count = 0, .min_vdisktime = 0, }
  93
  94/*
  95 * Per process-grouping structure
  96 */
  97struct cfq_queue {
  98        /* reference count */
  99        int ref;
 100        /* various state flags, see below */
 101        unsigned int flags;
 102        /* parent cfq_data */
 103        struct cfq_data *cfqd;
 104        /* service_tree member */
 105        struct rb_node rb_node;
 106        /* service_tree key */
 107        unsigned long rb_key;
 108        /* prio tree member */
 109        struct rb_node p_node;
 110        /* prio tree root we belong to, if any */
 111        struct rb_root *p_root;
 112        /* sorted list of pending requests */
 113        struct rb_root sort_list;
 114        /* if fifo isn't expired, next request to serve */
 115        struct request *next_rq;
 116        /* requests queued in sort_list */
 117        int queued[2];
 118        /* currently allocated requests */
 119        int allocated[2];
 120        /* fifo list of requests in sort_list */
 121        struct list_head fifo;
 122
 123        /* time when queue got scheduled in to dispatch first request. */
 124        unsigned long dispatch_start;
 125        unsigned int allocated_slice;
 126        unsigned int slice_dispatch;
 127        /* time when first request from queue completed and slice started. */
 128        unsigned long slice_start;
 129        unsigned long slice_end;
 130        long slice_resid;
 131
 132        /* pending metadata requests */
 133        int meta_pending;
 134        /* number of requests that are on the dispatch list or inside driver */
 135        int dispatched;
 136
 137        /* io prio of this group */
 138        unsigned short ioprio, org_ioprio;
 139        unsigned short ioprio_class, org_ioprio_class;
 140
 141        pid_t pid;
 142
 143        u32 seek_history;
 144        sector_t last_request_pos;
 145
 146        struct cfq_rb_root *service_tree;
 147        struct cfq_queue *new_cfqq;
 148        struct cfq_group *cfqg;
 149        /* Number of sectors dispatched from queue in single dispatch round */
 150        unsigned long nr_sectors;
 151};
 152
 153/*
 154 * First index in the service_trees.
 155 * IDLE is handled separately, so it has negative index
 156 */
 157enum wl_prio_t {
 158        BE_WORKLOAD = 0,
 159        RT_WORKLOAD = 1,
 160        IDLE_WORKLOAD = 2,
 161        CFQ_PRIO_NR,
 162};
 163
 164/*
 165 * Second index in the service_trees.
 166 */
 167enum wl_type_t {
 168        ASYNC_WORKLOAD = 0,
 169        SYNC_NOIDLE_WORKLOAD = 1,
 170        SYNC_WORKLOAD = 2
 171};
 172
 173/* This is per cgroup per device grouping structure */
 174struct cfq_group {
 175        /* group service_tree member */
 176        struct rb_node rb_node;
 177
 178        /* group service_tree key */
 179        u64 vdisktime;
 180        unsigned int weight;
 181        unsigned int new_weight;
 182        bool needs_update;
 183
 184        /* number of cfqq currently on this group */
 185        int nr_cfqq;
 186
 187        /*
 188         * Per group busy queus average. Useful for workload slice calc. We
 189         * create the array for each prio class but at run time it is used
 190         * only for RT and BE class and slot for IDLE class remains unused.
 191         * This is primarily done to avoid confusion and a gcc warning.
 192         */
 193        unsigned int busy_queues_avg[CFQ_PRIO_NR];
 194        /*
 195         * rr lists of queues with requests. We maintain service trees for
 196         * RT and BE classes. These trees are subdivided in subclasses
 197         * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 198         * class there is no subclassification and all the cfq queues go on
 199         * a single tree service_tree_idle.
 200         * Counts are embedded in the cfq_rb_root
 201         */
 202        struct cfq_rb_root service_trees[2][3];
 203        struct cfq_rb_root service_tree_idle;
 204
 205        unsigned long saved_workload_slice;
 206        enum wl_type_t saved_workload;
 207        enum wl_prio_t saved_serving_prio;
 208        struct blkio_group blkg;
 209#ifdef CONFIG_CFQ_GROUP_IOSCHED
 210        struct hlist_node cfqd_node;
 211        int ref;
 212#endif
 213        /* number of requests that are on the dispatch list or inside driver */
 214        int dispatched;
 215};
 216
 217/*
 218 * Per block device queue structure
 219 */
 220struct cfq_data {
 221        struct request_queue *queue;
 222        /* Root service tree for cfq_groups */
 223        struct cfq_rb_root grp_service_tree;
 224        struct cfq_group root_group;
 225
 226        /*
 227         * The priority currently being served
 228         */
 229        enum wl_prio_t serving_prio;
 230        enum wl_type_t serving_type;
 231        unsigned long workload_expires;
 232        struct cfq_group *serving_group;
 233
 234        /*
 235         * Each priority tree is sorted by next_request position.  These
 236         * trees are used when determining if two or more queues are
 237         * interleaving requests (see cfq_close_cooperator).
 238         */
 239        struct rb_root prio_trees[CFQ_PRIO_LISTS];
 240
 241        unsigned int busy_queues;
 242        unsigned int busy_sync_queues;
 243
 244        int rq_in_driver;
 245        int rq_in_flight[2];
 246
 247        /*
 248         * queue-depth detection
 249         */
 250        int rq_queued;
 251        int hw_tag;
 252        /*
 253         * hw_tag can be
 254         * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 255         *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 256         *  0 => no NCQ
 257         */
 258        int hw_tag_est_depth;
 259        unsigned int hw_tag_samples;
 260
 261        /*
 262         * idle window management
 263         */
 264        struct timer_list idle_slice_timer;
 265        struct work_struct unplug_work;
 266
 267        struct cfq_queue *active_queue;
 268        struct cfq_io_context *active_cic;
 269
 270        /*
 271         * async queue for each priority case
 272         */
 273        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 274        struct cfq_queue *async_idle_cfqq;
 275
 276        sector_t last_position;
 277
 278        /*
 279         * tunables, see top of file
 280         */
 281        unsigned int cfq_quantum;
 282        unsigned int cfq_fifo_expire[2];
 283        unsigned int cfq_back_penalty;
 284        unsigned int cfq_back_max;
 285        unsigned int cfq_slice[2];
 286        unsigned int cfq_slice_async_rq;
 287        unsigned int cfq_slice_idle;
 288        unsigned int cfq_group_idle;
 289        unsigned int cfq_latency;
 290
 291        unsigned int cic_index;
 292        struct list_head cic_list;
 293
 294        /*
 295         * Fallback dummy cfqq for extreme OOM conditions
 296         */
 297        struct cfq_queue oom_cfqq;
 298
 299        unsigned long last_delayed_sync;
 300
 301        /* List of cfq groups being managed on this device*/
 302        struct hlist_head cfqg_list;
 303        struct rcu_head rcu;
 304};
 305
 306static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 307
 308static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
 309                                            enum wl_prio_t prio,
 310                                            enum wl_type_t type)
 311{
 312        if (!cfqg)
 313                return NULL;
 314
 315        if (prio == IDLE_WORKLOAD)
 316                return &cfqg->service_tree_idle;
 317
 318        return &cfqg->service_trees[prio][type];
 319}
 320
 321enum cfqq_state_flags {
 322        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
 323        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
 324        CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
 325        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
 326        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 327        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
 328        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
 329        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
 330        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 331        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
 332        CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
 333        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
 334        CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
 335};
 336
 337#define CFQ_CFQQ_FNS(name)                                              \
 338static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
 339{                                                                       \
 340        (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 341}                                                                       \
 342static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
 343{                                                                       \
 344        (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 345}                                                                       \
 346static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
 347{                                                                       \
 348        return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 349}
 350
 351CFQ_CFQQ_FNS(on_rr);
 352CFQ_CFQQ_FNS(wait_request);
 353CFQ_CFQQ_FNS(must_dispatch);
 354CFQ_CFQQ_FNS(must_alloc_slice);
 355CFQ_CFQQ_FNS(fifo_expire);
 356CFQ_CFQQ_FNS(idle_window);
 357CFQ_CFQQ_FNS(prio_changed);
 358CFQ_CFQQ_FNS(slice_new);
 359CFQ_CFQQ_FNS(sync);
 360CFQ_CFQQ_FNS(coop);
 361CFQ_CFQQ_FNS(split_coop);
 362CFQ_CFQQ_FNS(deep);
 363CFQ_CFQQ_FNS(wait_busy);
 364#undef CFQ_CFQQ_FNS
 365
 366#ifdef CONFIG_CFQ_GROUP_IOSCHED
 367#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 368        blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
 369                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
 370                        blkg_path(&(cfqq)->cfqg->blkg), ##args);
 371
 372#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
 373        blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
 374                                blkg_path(&(cfqg)->blkg), ##args);      \
 375
 376#else
 377#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 378        blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
 379#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0);
 380#endif
 381#define cfq_log(cfqd, fmt, args...)     \
 382        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 383
 384/* Traverses through cfq group service trees */
 385#define for_each_cfqg_st(cfqg, i, j, st) \
 386        for (i = 0; i <= IDLE_WORKLOAD; i++) \
 387                for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 388                        : &cfqg->service_tree_idle; \
 389                        (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 390                        (i == IDLE_WORKLOAD && j == 0); \
 391                        j++, st = i < IDLE_WORKLOAD ? \
 392                        &cfqg->service_trees[i][j]: NULL) \
 393
 394
 395static inline bool iops_mode(struct cfq_data *cfqd)
 396{
 397        /*
 398         * If we are not idling on queues and it is a NCQ drive, parallel
 399         * execution of requests is on and measuring time is not possible
 400         * in most of the cases until and unless we drive shallower queue
 401         * depths and that becomes a performance bottleneck. In such cases
 402         * switch to start providing fairness in terms of number of IOs.
 403         */
 404        if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 405                return true;
 406        else
 407                return false;
 408}
 409
 410static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
 411{
 412        if (cfq_class_idle(cfqq))
 413                return IDLE_WORKLOAD;
 414        if (cfq_class_rt(cfqq))
 415                return RT_WORKLOAD;
 416        return BE_WORKLOAD;
 417}
 418
 419
 420static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 421{
 422        if (!cfq_cfqq_sync(cfqq))
 423                return ASYNC_WORKLOAD;
 424        if (!cfq_cfqq_idle_window(cfqq))
 425                return SYNC_NOIDLE_WORKLOAD;
 426        return SYNC_WORKLOAD;
 427}
 428
 429static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
 430                                        struct cfq_data *cfqd,
 431                                        struct cfq_group *cfqg)
 432{
 433        if (wl == IDLE_WORKLOAD)
 434                return cfqg->service_tree_idle.count;
 435
 436        return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
 437                + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
 438                + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
 439}
 440
 441static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 442                                        struct cfq_group *cfqg)
 443{
 444        return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
 445                + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 446}
 447
 448static void cfq_dispatch_insert(struct request_queue *, struct request *);
 449static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
 450                                       struct io_context *, gfp_t);
 451static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
 452                                                struct io_context *);
 453
 454static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
 455                                            bool is_sync)
 456{
 457        return cic->cfqq[is_sync];
 458}
 459
 460static inline void cic_set_cfqq(struct cfq_io_context *cic,
 461                                struct cfq_queue *cfqq, bool is_sync)
 462{
 463        cic->cfqq[is_sync] = cfqq;
 464}
 465
 466#define CIC_DEAD_KEY    1ul
 467#define CIC_DEAD_INDEX_SHIFT    1
 468
 469static inline void *cfqd_dead_key(struct cfq_data *cfqd)
 470{
 471        return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
 472}
 473
 474static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
 475{
 476        struct cfq_data *cfqd = cic->key;
 477
 478        if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
 479                return NULL;
 480
 481        return cfqd;
 482}
 483
 484/*
 485 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 486 * set (in which case it could also be direct WRITE).
 487 */
 488static inline bool cfq_bio_sync(struct bio *bio)
 489{
 490        return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 491}
 492
 493/*
 494 * scheduler run of queue, if there are requests pending and no one in the
 495 * driver that will restart queueing
 496 */
 497static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 498{
 499        if (cfqd->busy_queues) {
 500                cfq_log(cfqd, "schedule dispatch");
 501                kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 502        }
 503}
 504
 505/*
 506 * Scale schedule slice based on io priority. Use the sync time slice only
 507 * if a queue is marked sync and has sync io queued. A sync queue with async
 508 * io only, should not get full sync slice length.
 509 */
 510static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 511                                 unsigned short prio)
 512{
 513        const int base_slice = cfqd->cfq_slice[sync];
 514
 515        WARN_ON(prio >= IOPRIO_BE_NR);
 516
 517        return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 518}
 519
 520static inline int
 521cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 522{
 523        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 524}
 525
 526static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
 527{
 528        u64 d = delta << CFQ_SERVICE_SHIFT;
 529
 530        d = d * BLKIO_WEIGHT_DEFAULT;
 531        do_div(d, cfqg->weight);
 532        return d;
 533}
 534
 535static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 536{
 537        s64 delta = (s64)(vdisktime - min_vdisktime);
 538        if (delta > 0)
 539                min_vdisktime = vdisktime;
 540
 541        return min_vdisktime;
 542}
 543
 544static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 545{
 546        s64 delta = (s64)(vdisktime - min_vdisktime);
 547        if (delta < 0)
 548                min_vdisktime = vdisktime;
 549
 550        return min_vdisktime;
 551}
 552
 553static void update_min_vdisktime(struct cfq_rb_root *st)
 554{
 555        struct cfq_group *cfqg;
 556
 557        if (st->left) {
 558                cfqg = rb_entry_cfqg(st->left);
 559                st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 560                                                  cfqg->vdisktime);
 561        }
 562}
 563
 564/*
 565 * get averaged number of queues of RT/BE priority.
 566 * average is updated, with a formula that gives more weight to higher numbers,
 567 * to quickly follows sudden increases and decrease slowly
 568 */
 569
 570static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 571                                        struct cfq_group *cfqg, bool rt)
 572{
 573        unsigned min_q, max_q;
 574        unsigned mult  = cfq_hist_divisor - 1;
 575        unsigned round = cfq_hist_divisor / 2;
 576        unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
 577
 578        min_q = min(cfqg->busy_queues_avg[rt], busy);
 579        max_q = max(cfqg->busy_queues_avg[rt], busy);
 580        cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
 581                cfq_hist_divisor;
 582        return cfqg->busy_queues_avg[rt];
 583}
 584
 585static inline unsigned
 586cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
 587{
 588        struct cfq_rb_root *st = &cfqd->grp_service_tree;
 589
 590        return cfq_target_latency * cfqg->weight / st->total_weight;
 591}
 592
 593static inline unsigned
 594cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 595{
 596        unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
 597        if (cfqd->cfq_latency) {
 598                /*
 599                 * interested queues (we consider only the ones with the same
 600                 * priority class in the cfq group)
 601                 */
 602                unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
 603                                                cfq_class_rt(cfqq));
 604                unsigned sync_slice = cfqd->cfq_slice[1];
 605                unsigned expect_latency = sync_slice * iq;
 606                unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
 607
 608                if (expect_latency > group_slice) {
 609                        unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
 610                        /* scale low_slice according to IO priority
 611                         * and sync vs async */
 612                        unsigned low_slice =
 613                                min(slice, base_low_slice * slice / sync_slice);
 614                        /* the adapted slice value is scaled to fit all iqs
 615                         * into the target latency */
 616                        slice = max(slice * group_slice / expect_latency,
 617                                    low_slice);
 618                }
 619        }
 620        return slice;
 621}
 622
 623static inline void
 624cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 625{
 626        unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
 627
 628        cfqq->slice_start = jiffies;
 629        cfqq->slice_end = jiffies + slice;
 630        cfqq->allocated_slice = slice;
 631        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
 632}
 633
 634/*
 635 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 636 * isn't valid until the first request from the dispatch is activated
 637 * and the slice time set.
 638 */
 639static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 640{
 641        if (cfq_cfqq_slice_new(cfqq))
 642                return false;
 643        if (time_before(jiffies, cfqq->slice_end))
 644                return false;
 645
 646        return true;
 647}
 648
 649/*
 650 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
 651 * We choose the request that is closest to the head right now. Distance
 652 * behind the head is penalized and only allowed to a certain extent.
 653 */
 654static struct request *
 655cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
 656{
 657        sector_t s1, s2, d1 = 0, d2 = 0;
 658        unsigned long back_max;
 659#define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
 660#define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
 661        unsigned wrap = 0; /* bit mask: requests behind the disk head? */
 662
 663        if (rq1 == NULL || rq1 == rq2)
 664                return rq2;
 665        if (rq2 == NULL)
 666                return rq1;
 667
 668        if (rq_is_sync(rq1) && !rq_is_sync(rq2))
 669                return rq1;
 670        else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
 671                return rq2;
 672        if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
 673                return rq1;
 674        else if ((rq2->cmd_flags & REQ_META) &&
 675                 !(rq1->cmd_flags & REQ_META))
 676                return rq2;
 677
 678        s1 = blk_rq_pos(rq1);
 679        s2 = blk_rq_pos(rq2);
 680
 681        /*
 682         * by definition, 1KiB is 2 sectors
 683         */
 684        back_max = cfqd->cfq_back_max * 2;
 685
 686        /*
 687         * Strict one way elevator _except_ in the case where we allow
 688         * short backward seeks which are biased as twice the cost of a
 689         * similar forward seek.
 690         */
 691        if (s1 >= last)
 692                d1 = s1 - last;
 693        else if (s1 + back_max >= last)
 694                d1 = (last - s1) * cfqd->cfq_back_penalty;
 695        else
 696                wrap |= CFQ_RQ1_WRAP;
 697
 698        if (s2 >= last)
 699                d2 = s2 - last;
 700        else if (s2 + back_max >= last)
 701                d2 = (last - s2) * cfqd->cfq_back_penalty;
 702        else
 703                wrap |= CFQ_RQ2_WRAP;
 704
 705        /* Found required data */
 706
 707        /*
 708         * By doing switch() on the bit mask "wrap" we avoid having to
 709         * check two variables for all permutations: --> faster!
 710         */
 711        switch (wrap) {
 712        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
 713                if (d1 < d2)
 714                        return rq1;
 715                else if (d2 < d1)
 716                        return rq2;
 717                else {
 718                        if (s1 >= s2)
 719                                return rq1;
 720                        else
 721                                return rq2;
 722                }
 723
 724        case CFQ_RQ2_WRAP:
 725                return rq1;
 726        case CFQ_RQ1_WRAP:
 727                return rq2;
 728        case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
 729        default:
 730                /*
 731                 * Since both rqs are wrapped,
 732                 * start with the one that's further behind head
 733                 * (--> only *one* back seek required),
 734                 * since back seek takes more time than forward.
 735                 */
 736                if (s1 <= s2)
 737                        return rq1;
 738                else
 739                        return rq2;
 740        }
 741}
 742
 743/*
 744 * The below is leftmost cache rbtree addon
 745 */
 746static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
 747{
 748        /* Service tree is empty */
 749        if (!root->count)
 750                return NULL;
 751
 752        if (!root->left)
 753                root->left = rb_first(&root->rb);
 754
 755        if (root->left)
 756                return rb_entry(root->left, struct cfq_queue, rb_node);
 757
 758        return NULL;
 759}
 760
 761static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
 762{
 763        if (!root->left)
 764                root->left = rb_first(&root->rb);
 765
 766        if (root->left)
 767                return rb_entry_cfqg(root->left);
 768
 769        return NULL;
 770}
 771
 772static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 773{
 774        rb_erase(n, root);
 775        RB_CLEAR_NODE(n);
 776}
 777
 778static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
 779{
 780        if (root->left == n)
 781                root->left = NULL;
 782        rb_erase_init(n, &root->rb);
 783        --root->count;
 784}
 785
 786/*
 787 * would be nice to take fifo expire time into account as well
 788 */
 789static struct request *
 790cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 791                  struct request *last)
 792{
 793        struct rb_node *rbnext = rb_next(&last->rb_node);
 794        struct rb_node *rbprev = rb_prev(&last->rb_node);
 795        struct request *next = NULL, *prev = NULL;
 796
 797        BUG_ON(RB_EMPTY_NODE(&last->rb_node));
 798
 799        if (rbprev)
 800                prev = rb_entry_rq(rbprev);
 801
 802        if (rbnext)
 803                next = rb_entry_rq(rbnext);
 804        else {
 805                rbnext = rb_first(&cfqq->sort_list);
 806                if (rbnext && rbnext != &last->rb_node)
 807                        next = rb_entry_rq(rbnext);
 808        }
 809
 810        return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
 811}
 812
 813static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
 814                                      struct cfq_queue *cfqq)
 815{
 816        /*
 817         * just an approximation, should be ok.
 818         */
 819        return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
 820                       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
 821}
 822
 823static inline s64
 824cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
 825{
 826        return cfqg->vdisktime - st->min_vdisktime;
 827}
 828
 829static void
 830__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 831{
 832        struct rb_node **node = &st->rb.rb_node;
 833        struct rb_node *parent = NULL;
 834        struct cfq_group *__cfqg;
 835        s64 key = cfqg_key(st, cfqg);
 836        int left = 1;
 837
 838        while (*node != NULL) {
 839                parent = *node;
 840                __cfqg = rb_entry_cfqg(parent);
 841
 842                if (key < cfqg_key(st, __cfqg))
 843                        node = &parent->rb_left;
 844                else {
 845                        node = &parent->rb_right;
 846                        left = 0;
 847                }
 848        }
 849
 850        if (left)
 851                st->left = &cfqg->rb_node;
 852
 853        rb_link_node(&cfqg->rb_node, parent, node);
 854        rb_insert_color(&cfqg->rb_node, &st->rb);
 855}
 856
 857static void
 858cfq_update_group_weight(struct cfq_group *cfqg)
 859{
 860        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 861        if (cfqg->needs_update) {
 862                cfqg->weight = cfqg->new_weight;
 863                cfqg->needs_update = false;
 864        }
 865}
 866
 867static void
 868cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 869{
 870        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 871
 872        cfq_update_group_weight(cfqg);
 873        __cfq_group_service_tree_add(st, cfqg);
 874        st->total_weight += cfqg->weight;
 875}
 876
 877static void
 878cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 879{
 880        struct cfq_rb_root *st = &cfqd->grp_service_tree;
 881        struct cfq_group *__cfqg;
 882        struct rb_node *n;
 883
 884        cfqg->nr_cfqq++;
 885        if (!RB_EMPTY_NODE(&cfqg->rb_node))
 886                return;
 887
 888        /*
 889         * Currently put the group at the end. Later implement something
 890         * so that groups get lesser vtime based on their weights, so that
 891         * if group does not loose all if it was not continuously backlogged.
 892         */
 893        n = rb_last(&st->rb);
 894        if (n) {
 895                __cfqg = rb_entry_cfqg(n);
 896                cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
 897        } else
 898                cfqg->vdisktime = st->min_vdisktime;
 899        cfq_group_service_tree_add(st, cfqg);
 900}
 901
 902static void
 903cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
 904{
 905        st->total_weight -= cfqg->weight;
 906        if (!RB_EMPTY_NODE(&cfqg->rb_node))
 907                cfq_rb_erase(&cfqg->rb_node, st);
 908}
 909
 910static void
 911cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
 912{
 913        struct cfq_rb_root *st = &cfqd->grp_service_tree;
 914
 915        BUG_ON(cfqg->nr_cfqq < 1);
 916        cfqg->nr_cfqq--;
 917
 918        /* If there are other cfq queues under this group, don't delete it */
 919        if (cfqg->nr_cfqq)
 920                return;
 921
 922        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
 923        cfq_group_service_tree_del(st, cfqg);
 924        cfqg->saved_workload_slice = 0;
 925        cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
 926}
 927
 928static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
 929                                                unsigned int *unaccounted_time)
 930{
 931        unsigned int slice_used;
 932
 933        /*
 934         * Queue got expired before even a single request completed or
 935         * got expired immediately after first request completion.
 936         */
 937        if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
 938                /*
 939                 * Also charge the seek time incurred to the group, otherwise
 940                 * if there are mutiple queues in the group, each can dispatch
 941                 * a single request on seeky media and cause lots of seek time
 942                 * and group will never know it.
 943                 */
 944                slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
 945                                        1);
 946        } else {
 947                slice_used = jiffies - cfqq->slice_start;
 948                if (slice_used > cfqq->allocated_slice) {
 949                        *unaccounted_time = slice_used - cfqq->allocated_slice;
 950                        slice_used = cfqq->allocated_slice;
 951                }
 952                if (time_after(cfqq->slice_start, cfqq->dispatch_start))
 953                        *unaccounted_time += cfqq->slice_start -
 954                                        cfqq->dispatch_start;
 955        }
 956
 957        return slice_used;
 958}
 959
 960static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 961                                struct cfq_queue *cfqq)
 962{
 963        struct cfq_rb_root *st = &cfqd->grp_service_tree;
 964        unsigned int used_sl, charge, unaccounted_sl = 0;
 965        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
 966                        - cfqg->service_tree_idle.count;
 967
 968        BUG_ON(nr_sync < 0);
 969        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
 970
 971        if (iops_mode(cfqd))
 972                charge = cfqq->slice_dispatch;
 973        else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
 974                charge = cfqq->allocated_slice;
 975
 976        /* Can't update vdisktime while group is on service tree */
 977        cfq_group_service_tree_del(st, cfqg);
 978        cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
 979        /* If a new weight was requested, update now, off tree */
 980        cfq_group_service_tree_add(st, cfqg);
 981
 982        /* This group is being expired. Save the context */
 983        if (time_after(cfqd->workload_expires, jiffies)) {
 984                cfqg->saved_workload_slice = cfqd->workload_expires
 985                                                - jiffies;
 986                cfqg->saved_workload = cfqd->serving_type;
 987                cfqg->saved_serving_prio = cfqd->serving_prio;
 988        } else
 989                cfqg->saved_workload_slice = 0;
 990
 991        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
 992                                        st->min_vdisktime);
 993        cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
 994                        " sect=%u", used_sl, cfqq->slice_dispatch, charge,
 995                        iops_mode(cfqd), cfqq->nr_sectors);
 996        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
 997                                          unaccounted_sl);
 998        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
 999}
1000
1001#ifdef CONFIG_CFQ_GROUP_IOSCHED
1002static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1003{
1004        if (blkg)
1005                return container_of(blkg, struct cfq_group, blkg);
1006        return NULL;
1007}
1008
1009void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1010                                        unsigned int weight)
1011{
1012        struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1013        cfqg->new_weight = weight;
1014        cfqg->needs_update = true;
1015}
1016
1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1018                struct blkio_cgroup *blkcg, int create)
1019{
1020        struct cfq_group *cfqg = NULL;
1021        void *key = cfqd;
1022        int i, j;
1023        struct cfq_rb_root *st;
1024        struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1025        unsigned int major, minor;
1026
1027        cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1028        if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1029                sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030                cfqg->blkg.dev = MKDEV(major, minor);
1031                goto done;
1032        }
1033        if (cfqg || !create)
1034                goto done;
1035
1036        cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1037        if (!cfqg)
1038                goto done;
1039
1040        for_each_cfqg_st(cfqg, i, j, st)
1041                *st = CFQ_RB_ROOT;
1042        RB_CLEAR_NODE(&cfqg->rb_node);
1043
1044        /*
1045         * Take the initial reference that will be released on destroy
1046         * This can be thought of a joint reference by cgroup and
1047         * elevator which will be dropped by either elevator exit
1048         * or cgroup deletion path depending on who is exiting first.
1049         */
1050        cfqg->ref = 1;
1051
1052        /*
1053         * Add group onto cgroup list. It might happen that bdi->dev is
1054         * not initialized yet. Initialize this new group without major
1055         * and minor info and this info will be filled in once a new thread
1056         * comes for IO. See code above.
1057         */
1058        if (bdi->dev) {
1059                sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1060                cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1061                                        MKDEV(major, minor));
1062        } else
1063                cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1064                                        0);
1065
1066        cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1067
1068        /* Add group on cfqd list */
1069        hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1070
1071done:
1072        return cfqg;
1073}
1074
1075/*
1076 * Search for the cfq group current task belongs to. If create = 1, then also
1077 * create the cfq group if it does not exist. request_queue lock must be held.
1078 */
1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1080{
1081        struct blkio_cgroup *blkcg;
1082        struct cfq_group *cfqg = NULL;
1083
1084        rcu_read_lock();
1085        blkcg = task_blkio_cgroup(current);
1086        cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
1087        if (!cfqg && create)
1088                cfqg = &cfqd->root_group;
1089        rcu_read_unlock();
1090        return cfqg;
1091}
1092
1093static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1094{
1095        cfqg->ref++;
1096        return cfqg;
1097}
1098
1099static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1100{
1101        /* Currently, all async queues are mapped to root group */
1102        if (!cfq_cfqq_sync(cfqq))
1103                cfqg = &cfqq->cfqd->root_group;
1104
1105        cfqq->cfqg = cfqg;
1106        /* cfqq reference on cfqg */
1107        cfqq->cfqg->ref++;
1108}
1109
1110static void cfq_put_cfqg(struct cfq_group *cfqg)
1111{
1112        struct cfq_rb_root *st;
1113        int i, j;
1114
1115        BUG_ON(cfqg->ref <= 0);
1116        cfqg->ref--;
1117        if (cfqg->ref)
1118                return;
1119        for_each_cfqg_st(cfqg, i, j, st)
1120                BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1121        kfree(cfqg);
1122}
1123
1124static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1125{
1126        /* Something wrong if we are trying to remove same group twice */
1127        BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1128
1129        hlist_del_init(&cfqg->cfqd_node);
1130
1131        /*
1132         * Put the reference taken at the time of creation so that when all
1133         * queues are gone, group can be destroyed.
1134         */
1135        cfq_put_cfqg(cfqg);
1136}
1137
1138static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1139{
1140        struct hlist_node *pos, *n;
1141        struct cfq_group *cfqg;
1142
1143        hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1144                /*
1145                 * If cgroup removal path got to blk_group first and removed
1146                 * it from cgroup list, then it will take care of destroying
1147                 * cfqg also.
1148                 */
1149                if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1150                        cfq_destroy_cfqg(cfqd, cfqg);
1151        }
1152}
1153
1154/*
1155 * Blk cgroup controller notification saying that blkio_group object is being
1156 * delinked as associated cgroup object is going away. That also means that
1157 * no new IO will come in this group. So get rid of this group as soon as
1158 * any pending IO in the group is finished.
1159 *
1160 * This function is called under rcu_read_lock(). key is the rcu protected
1161 * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1162 * read lock.
1163 *
1164 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1165 * it should not be NULL as even if elevator was exiting, cgroup deltion
1166 * path got to it first.
1167 */
1168void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1169{
1170        unsigned long  flags;
1171        struct cfq_data *cfqd = key;
1172
1173        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1174        cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1175        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1176}
1177
1178#else /* GROUP_IOSCHED */
1179static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1180{
1181        return &cfqd->root_group;
1182}
1183
1184static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1185{
1186        return cfqg;
1187}
1188
1189static inline void
1190cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1191        cfqq->cfqg = cfqg;
1192}
1193
1194static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1195static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1196
1197#endif /* GROUP_IOSCHED */
1198
1199/*
1200 * The cfqd->service_trees holds all pending cfq_queue's that have
1201 * requests waiting to be processed. It is sorted in the order that
1202 * we will service the queues.
1203 */
1204static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1205                                 bool add_front)
1206{
1207        struct rb_node **p, *parent;
1208        struct cfq_queue *__cfqq;
1209        unsigned long rb_key;
1210        struct cfq_rb_root *service_tree;
1211        int left;
1212        int new_cfqq = 1;
1213        int group_changed = 0;
1214
1215        service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1216                                                cfqq_type(cfqq));
1217        if (cfq_class_idle(cfqq)) {
1218                rb_key = CFQ_IDLE_DELAY;
1219                parent = rb_last(&service_tree->rb);
1220                if (parent && parent != &cfqq->rb_node) {
1221                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1222                        rb_key += __cfqq->rb_key;
1223                } else
1224                        rb_key += jiffies;
1225        } else if (!add_front) {
1226                /*
1227                 * Get our rb key offset. Subtract any residual slice
1228                 * value carried from last service. A negative resid
1229                 * count indicates slice overrun, and this should position
1230                 * the next service time further away in the tree.
1231                 */
1232                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1233                rb_key -= cfqq->slice_resid;
1234                cfqq->slice_resid = 0;
1235        } else {
1236                rb_key = -HZ;
1237                __cfqq = cfq_rb_first(service_tree);
1238                rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1239        }
1240
1241        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1242                new_cfqq = 0;
1243                /*
1244                 * same position, nothing more to do
1245                 */
1246                if (rb_key == cfqq->rb_key &&
1247                    cfqq->service_tree == service_tree)
1248                        return;
1249
1250                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1251                cfqq->service_tree = NULL;
1252        }
1253
1254        left = 1;
1255        parent = NULL;
1256        cfqq->service_tree = service_tree;
1257        p = &service_tree->rb.rb_node;
1258        while (*p) {
1259                struct rb_node **n;
1260
1261                parent = *p;
1262                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1263
1264                /*
1265                 * sort by key, that represents service time.
1266                 */
1267                if (time_before(rb_key, __cfqq->rb_key))
1268                        n = &(*p)->rb_left;
1269                else {
1270                        n = &(*p)->rb_right;
1271                        left = 0;
1272                }
1273
1274                p = n;
1275        }
1276
1277        if (left)
1278                service_tree->left = &cfqq->rb_node;
1279
1280        cfqq->rb_key = rb_key;
1281        rb_link_node(&cfqq->rb_node, parent, p);
1282        rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1283        service_tree->count++;
1284        if ((add_front || !new_cfqq) && !group_changed)
1285                return;
1286        cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1287}
1288
1289static struct cfq_queue *
1290cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1291                     sector_t sector, struct rb_node **ret_parent,
1292                     struct rb_node ***rb_link)
1293{
1294        struct rb_node **p, *parent;
1295        struct cfq_queue *cfqq = NULL;
1296
1297        parent = NULL;
1298        p = &root->rb_node;
1299        while (*p) {
1300                struct rb_node **n;
1301
1302                parent = *p;
1303                cfqq = rb_entry(parent, struct cfq_queue, p_node);
1304
1305                /*
1306                 * Sort strictly based on sector.  Smallest to the left,
1307                 * largest to the right.
1308                 */
1309                if (sector > blk_rq_pos(cfqq->next_rq))
1310                        n = &(*p)->rb_right;
1311                else if (sector < blk_rq_pos(cfqq->next_rq))
1312                        n = &(*p)->rb_left;
1313                else
1314                        break;
1315                p = n;
1316                cfqq = NULL;
1317        }
1318
1319        *ret_parent = parent;
1320        if (rb_link)
1321                *rb_link = p;
1322        return cfqq;
1323}
1324
1325static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1326{
1327        struct rb_node **p, *parent;
1328        struct cfq_queue *__cfqq;
1329
1330        if (cfqq->p_root) {
1331                rb_erase(&cfqq->p_node, cfqq->p_root);
1332                cfqq->p_root = NULL;
1333        }
1334
1335        if (cfq_class_idle(cfqq))
1336                return;
1337        if (!cfqq->next_rq)
1338                return;
1339
1340        cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1341        __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1342                                      blk_rq_pos(cfqq->next_rq), &parent, &p);
1343        if (!__cfqq) {
1344                rb_link_node(&cfqq->p_node, parent, p);
1345                rb_insert_color(&cfqq->p_node, cfqq->p_root);
1346        } else
1347                cfqq->p_root = NULL;
1348}
1349
1350/*
1351 * Update cfqq's position in the service tree.
1352 */
1353static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1354{
1355        /*
1356         * Resorting requires the cfqq to be on the RR list already.
1357         */
1358        if (cfq_cfqq_on_rr(cfqq)) {
1359                cfq_service_tree_add(cfqd, cfqq, 0);
1360                cfq_prio_tree_add(cfqd, cfqq);
1361        }
1362}
1363
1364/*
1365 * add to busy list of queues for service, trying to be fair in ordering
1366 * the pending list according to last request service
1367 */
1368static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1369{
1370        cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1371        BUG_ON(cfq_cfqq_on_rr(cfqq));
1372        cfq_mark_cfqq_on_rr(cfqq);
1373        cfqd->busy_queues++;
1374        if (cfq_cfqq_sync(cfqq))
1375                cfqd->busy_sync_queues++;
1376
1377        cfq_resort_rr_list(cfqd, cfqq);
1378}
1379
1380/*
1381 * Called when the cfqq no longer has requests pending, remove it from
1382 * the service tree.
1383 */
1384static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1385{
1386        cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1387        BUG_ON(!cfq_cfqq_on_rr(cfqq));
1388        cfq_clear_cfqq_on_rr(cfqq);
1389
1390        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1391                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1392                cfqq->service_tree = NULL;
1393        }
1394        if (cfqq->p_root) {
1395                rb_erase(&cfqq->p_node, cfqq->p_root);
1396                cfqq->p_root = NULL;
1397        }
1398
1399        cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1400        BUG_ON(!cfqd->busy_queues);
1401        cfqd->busy_queues--;
1402        if (cfq_cfqq_sync(cfqq))
1403                cfqd->busy_sync_queues--;
1404}
1405
1406/*
1407 * rb tree support functions
1408 */
1409static void cfq_del_rq_rb(struct request *rq)
1410{
1411        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1412        const int sync = rq_is_sync(rq);
1413
1414        BUG_ON(!cfqq->queued[sync]);
1415        cfqq->queued[sync]--;
1416
1417        elv_rb_del(&cfqq->sort_list, rq);
1418
1419        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1420                /*
1421                 * Queue will be deleted from service tree when we actually
1422                 * expire it later. Right now just remove it from prio tree
1423                 * as it is empty.
1424                 */
1425                if (cfqq->p_root) {
1426                        rb_erase(&cfqq->p_node, cfqq->p_root);
1427                        cfqq->p_root = NULL;
1428                }
1429        }
1430}
1431
1432static void cfq_add_rq_rb(struct request *rq)
1433{
1434        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1435        struct cfq_data *cfqd = cfqq->cfqd;
1436        struct request *__alias, *prev;
1437
1438        cfqq->queued[rq_is_sync(rq)]++;
1439
1440        /*
1441         * looks a little odd, but the first insert might return an alias.
1442         * if that happens, put the alias on the dispatch list
1443         */
1444        while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1445                cfq_dispatch_insert(cfqd->queue, __alias);
1446
1447        if (!cfq_cfqq_on_rr(cfqq))
1448                cfq_add_cfqq_rr(cfqd, cfqq);
1449
1450        /*
1451         * check if this request is a better next-serve candidate
1452         */
1453        prev = cfqq->next_rq;
1454        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1455
1456        /*
1457         * adjust priority tree position, if ->next_rq changes
1458         */
1459        if (prev != cfqq->next_rq)
1460                cfq_prio_tree_add(cfqd, cfqq);
1461
1462        BUG_ON(!cfqq->next_rq);
1463}
1464
1465static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1466{
1467        elv_rb_del(&cfqq->sort_list, rq);
1468        cfqq->queued[rq_is_sync(rq)]--;
1469        cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1470                                        rq_data_dir(rq), rq_is_sync(rq));
1471        cfq_add_rq_rb(rq);
1472        cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1473                        &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1474                        rq_is_sync(rq));
1475}
1476
1477static struct request *
1478cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1479{
1480        struct task_struct *tsk = current;
1481        struct cfq_io_context *cic;
1482        struct cfq_queue *cfqq;
1483
1484        cic = cfq_cic_lookup(cfqd, tsk->io_context);
1485        if (!cic)
1486                return NULL;
1487
1488        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1489        if (cfqq) {
1490                sector_t sector = bio->bi_sector + bio_sectors(bio);
1491
1492                return elv_rb_find(&cfqq->sort_list, sector);
1493        }
1494
1495        return NULL;
1496}
1497
1498static void cfq_activate_request(struct request_queue *q, struct request *rq)
1499{
1500        struct cfq_data *cfqd = q->elevator->elevator_data;
1501
1502        cfqd->rq_in_driver++;
1503        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1504                                                cfqd->rq_in_driver);
1505
1506        cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1507}
1508
1509static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1510{
1511        struct cfq_data *cfqd = q->elevator->elevator_data;
1512
1513        WARN_ON(!cfqd->rq_in_driver);
1514        cfqd->rq_in_driver--;
1515        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1516                                                cfqd->rq_in_driver);
1517}
1518
1519static void cfq_remove_request(struct request *rq)
1520{
1521        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1522
1523        if (cfqq->next_rq == rq)
1524                cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1525
1526        list_del_init(&rq->queuelist);
1527        cfq_del_rq_rb(rq);
1528
1529        cfqq->cfqd->rq_queued--;
1530        cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1531                                        rq_data_dir(rq), rq_is_sync(rq));
1532        if (rq->cmd_flags & REQ_META) {
1533                WARN_ON(!cfqq->meta_pending);
1534                cfqq->meta_pending--;
1535        }
1536}
1537
1538static int cfq_merge(struct request_queue *q, struct request **req,
1539                     struct bio *bio)
1540{
1541        struct cfq_data *cfqd = q->elevator->elevator_data;
1542        struct request *__rq;
1543
1544        __rq = cfq_find_rq_fmerge(cfqd, bio);
1545        if (__rq && elv_rq_merge_ok(__rq, bio)) {
1546                *req = __rq;
1547                return ELEVATOR_FRONT_MERGE;
1548        }
1549
1550        return ELEVATOR_NO_MERGE;
1551}
1552
1553static void cfq_merged_request(struct request_queue *q, struct request *req,
1554                               int type)
1555{
1556        if (type == ELEVATOR_FRONT_MERGE) {
1557                struct cfq_queue *cfqq = RQ_CFQQ(req);
1558
1559                cfq_reposition_rq_rb(cfqq, req);
1560        }
1561}
1562
1563static void cfq_bio_merged(struct request_queue *q, struct request *req,
1564                                struct bio *bio)
1565{
1566        cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1567                                        bio_data_dir(bio), cfq_bio_sync(bio));
1568}
1569
1570static void
1571cfq_merged_requests(struct request_queue *q, struct request *rq,
1572                    struct request *next)
1573{
1574        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1575        /*
1576         * reposition in fifo if next is older than rq
1577         */
1578        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1579            time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1580                list_move(&rq->queuelist, &next->queuelist);
1581                rq_set_fifo_time(rq, rq_fifo_time(next));
1582        }
1583
1584        if (cfqq->next_rq == next)
1585                cfqq->next_rq = rq;
1586        cfq_remove_request(next);
1587        cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1588                                        rq_data_dir(next), rq_is_sync(next));
1589}
1590
1591static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1592                           struct bio *bio)
1593{
1594        struct cfq_data *cfqd = q->elevator->elevator_data;
1595        struct cfq_io_context *cic;
1596        struct cfq_queue *cfqq;
1597
1598        /*
1599         * Disallow merge of a sync bio into an async request.
1600         */
1601        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1602                return false;
1603
1604        /*
1605         * Lookup the cfqq that this bio will be queued with. Allow
1606         * merge only if rq is queued there.
1607         */
1608        cic = cfq_cic_lookup(cfqd, current->io_context);
1609        if (!cic)
1610                return false;
1611
1612        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1613        return cfqq == RQ_CFQQ(rq);
1614}
1615
1616static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1617{
1618        del_timer(&cfqd->idle_slice_timer);
1619        cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1620}
1621
1622static void __cfq_set_active_queue(struct cfq_data *cfqd,
1623                                   struct cfq_queue *cfqq)
1624{
1625        if (cfqq) {
1626                cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1627                                cfqd->serving_prio, cfqd->serving_type);
1628                cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1629                cfqq->slice_start = 0;
1630                cfqq->dispatch_start = jiffies;
1631                cfqq->allocated_slice = 0;
1632                cfqq->slice_end = 0;
1633                cfqq->slice_dispatch = 0;
1634                cfqq->nr_sectors = 0;
1635
1636                cfq_clear_cfqq_wait_request(cfqq);
1637                cfq_clear_cfqq_must_dispatch(cfqq);
1638                cfq_clear_cfqq_must_alloc_slice(cfqq);
1639                cfq_clear_cfqq_fifo_expire(cfqq);
1640                cfq_mark_cfqq_slice_new(cfqq);
1641
1642                cfq_del_timer(cfqd, cfqq);
1643        }
1644
1645        cfqd->active_queue = cfqq;
1646}
1647
1648/*
1649 * current cfqq expired its slice (or was too idle), select new one
1650 */
1651static void
1652__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1653                    bool timed_out)
1654{
1655        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1656
1657        if (cfq_cfqq_wait_request(cfqq))
1658                cfq_del_timer(cfqd, cfqq);
1659
1660        cfq_clear_cfqq_wait_request(cfqq);
1661        cfq_clear_cfqq_wait_busy(cfqq);
1662
1663        /*
1664         * If this cfqq is shared between multiple processes, check to
1665         * make sure that those processes are still issuing I/Os within
1666         * the mean seek distance.  If not, it may be time to break the
1667         * queues apart again.
1668         */
1669        if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1670                cfq_mark_cfqq_split_coop(cfqq);
1671
1672        /*
1673         * store what was left of this slice, if the queue idled/timed out
1674         */
1675        if (timed_out) {
1676                if (cfq_cfqq_slice_new(cfqq))
1677                        cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1678                else
1679                        cfqq->slice_resid = cfqq->slice_end - jiffies;
1680                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1681        }
1682
1683        cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1684
1685        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1686                cfq_del_cfqq_rr(cfqd, cfqq);
1687
1688        cfq_resort_rr_list(cfqd, cfqq);
1689
1690        if (cfqq == cfqd->active_queue)
1691                cfqd->active_queue = NULL;
1692
1693        if (cfqd->active_cic) {
1694                put_io_context(cfqd->active_cic->ioc);
1695                cfqd->active_cic = NULL;
1696        }
1697}
1698
1699static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1700{
1701        struct cfq_queue *cfqq = cfqd->active_queue;
1702
1703        if (cfqq)
1704                __cfq_slice_expired(cfqd, cfqq, timed_out);
1705}
1706
1707/*
1708 * Get next queue for service. Unless we have a queue preemption,
1709 * we'll simply select the first cfqq in the service tree.
1710 */
1711static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1712{
1713        struct cfq_rb_root *service_tree =
1714                service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1715                                        cfqd->serving_type);
1716
1717        if (!cfqd->rq_queued)
1718                return NULL;
1719
1720        /* There is nothing to dispatch */
1721        if (!service_tree)
1722                return NULL;
1723        if (RB_EMPTY_ROOT(&service_tree->rb))
1724                return NULL;
1725        return cfq_rb_first(service_tree);
1726}
1727
1728static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1729{
1730        struct cfq_group *cfqg;
1731        struct cfq_queue *cfqq;
1732        int i, j;
1733        struct cfq_rb_root *st;
1734
1735        if (!cfqd->rq_queued)
1736                return NULL;
1737
1738        cfqg = cfq_get_next_cfqg(cfqd);
1739        if (!cfqg)
1740                return NULL;
1741
1742        for_each_cfqg_st(cfqg, i, j, st)
1743                if ((cfqq = cfq_rb_first(st)) != NULL)
1744                        return cfqq;
1745        return NULL;
1746}
1747
1748/*
1749 * Get and set a new active queue for service.
1750 */
1751static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1752                                              struct cfq_queue *cfqq)
1753{
1754        if (!cfqq)
1755                cfqq = cfq_get_next_queue(cfqd);
1756
1757        __cfq_set_active_queue(cfqd, cfqq);
1758        return cfqq;
1759}
1760
1761static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1762                                          struct request *rq)
1763{
1764        if (blk_rq_pos(rq) >= cfqd->last_position)
1765                return blk_rq_pos(rq) - cfqd->last_position;
1766        else
1767                return cfqd->last_position - blk_rq_pos(rq);
1768}
1769
1770static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1771                               struct request *rq)
1772{
1773        return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1774}
1775
1776static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1777                                    struct cfq_queue *cur_cfqq)
1778{
1779        struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1780        struct rb_node *parent, *node;
1781        struct cfq_queue *__cfqq;
1782        sector_t sector = cfqd->last_position;
1783
1784        if (RB_EMPTY_ROOT(root))
1785                return NULL;
1786
1787        /*
1788         * First, if we find a request starting at the end of the last
1789         * request, choose it.
1790         */
1791        __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1792        if (__cfqq)
1793                return __cfqq;
1794
1795        /*
1796         * If the exact sector wasn't found, the parent of the NULL leaf
1797         * will contain the closest sector.
1798         */
1799        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1800        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1801                return __cfqq;
1802
1803        if (blk_rq_pos(__cfqq->next_rq) < sector)
1804                node = rb_next(&__cfqq->p_node);
1805        else
1806                node = rb_prev(&__cfqq->p_node);
1807        if (!node)
1808                return NULL;
1809
1810        __cfqq = rb_entry(node, struct cfq_queue, p_node);
1811        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1812                return __cfqq;
1813
1814        return NULL;
1815}
1816
1817/*
1818 * cfqd - obvious
1819 * cur_cfqq - passed in so that we don't decide that the current queue is
1820 *            closely cooperating with itself.
1821 *
1822 * So, basically we're assuming that that cur_cfqq has dispatched at least
1823 * one request, and that cfqd->last_position reflects a position on the disk
1824 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1825 * assumption.
1826 */
1827static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1828                                              struct cfq_queue *cur_cfqq)
1829{
1830        struct cfq_queue *cfqq;
1831
1832        if (cfq_class_idle(cur_cfqq))
1833                return NULL;
1834        if (!cfq_cfqq_sync(cur_cfqq))
1835                return NULL;
1836        if (CFQQ_SEEKY(cur_cfqq))
1837                return NULL;
1838
1839        /*
1840         * Don't search priority tree if it's the only queue in the group.
1841         */
1842        if (cur_cfqq->cfqg->nr_cfqq == 1)
1843                return NULL;
1844
1845        /*
1846         * We should notice if some of the queues are cooperating, eg
1847         * working closely on the same area of the disk. In that case,
1848         * we can group them together and don't waste time idling.
1849         */
1850        cfqq = cfqq_close(cfqd, cur_cfqq);
1851        if (!cfqq)
1852                return NULL;
1853
1854        /* If new queue belongs to different cfq_group, don't choose it */
1855        if (cur_cfqq->cfqg != cfqq->cfqg)
1856                return NULL;
1857
1858        /*
1859         * It only makes sense to merge sync queues.
1860         */
1861        if (!cfq_cfqq_sync(cfqq))
1862                return NULL;
1863        if (CFQQ_SEEKY(cfqq))
1864                return NULL;
1865
1866        /*
1867         * Do not merge queues of different priority classes
1868         */
1869        if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1870                return NULL;
1871
1872        return cfqq;
1873}
1874
1875/*
1876 * Determine whether we should enforce idle window for this queue.
1877 */
1878
1879static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1880{
1881        enum wl_prio_t prio = cfqq_prio(cfqq);
1882        struct cfq_rb_root *service_tree = cfqq->service_tree;
1883
1884        BUG_ON(!service_tree);
1885        BUG_ON(!service_tree->count);
1886
1887        if (!cfqd->cfq_slice_idle)
1888                return false;
1889
1890        /* We never do for idle class queues. */
1891        if (prio == IDLE_WORKLOAD)
1892                return false;
1893
1894        /* We do for queues that were marked with idle window flag. */
1895        if (cfq_cfqq_idle_window(cfqq) &&
1896           !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1897                return true;
1898
1899        /*
1900         * Otherwise, we do only if they are the last ones
1901         * in their service tree.
1902         */
1903        if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1904                return true;
1905        cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1906                        service_tree->count);
1907        return false;
1908}
1909
1910static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1911{
1912        struct cfq_queue *cfqq = cfqd->active_queue;
1913        struct cfq_io_context *cic;
1914        unsigned long sl, group_idle = 0;
1915
1916        /*
1917         * SSD device without seek penalty, disable idling. But only do so
1918         * for devices that support queuing, otherwise we still have a problem
1919         * with sync vs async workloads.
1920         */
1921        if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1922                return;
1923
1924        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1925        WARN_ON(cfq_cfqq_slice_new(cfqq));
1926
1927        /*
1928         * idle is disabled, either manually or by past process history
1929         */
1930        if (!cfq_should_idle(cfqd, cfqq)) {
1931                /* no queue idling. Check for group idling */
1932                if (cfqd->cfq_group_idle)
1933                        group_idle = cfqd->cfq_group_idle;
1934                else
1935                        return;
1936        }
1937
1938        /*
1939         * still active requests from this queue, don't idle
1940         */
1941        if (cfqq->dispatched)
1942                return;
1943
1944        /*
1945         * task has exited, don't wait
1946         */
1947        cic = cfqd->active_cic;
1948        if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1949                return;
1950
1951        /*
1952         * If our average think time is larger than the remaining time
1953         * slice, then don't idle. This avoids overrunning the allotted
1954         * time slice.
1955         */
1956        if (sample_valid(cic->ttime_samples) &&
1957            (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1958                cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1959                                cic->ttime_mean);
1960                return;
1961        }
1962
1963        /* There are other queues in the group, don't do group idle */
1964        if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1965                return;
1966
1967        cfq_mark_cfqq_wait_request(cfqq);
1968
1969        if (group_idle)
1970                sl = cfqd->cfq_group_idle;
1971        else
1972                sl = cfqd->cfq_slice_idle;
1973
1974        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1975        cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1976        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1977                        group_idle ? 1 : 0);
1978}
1979
1980/*
1981 * Move request from internal lists to the request queue dispatch list.
1982 */
1983static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1984{
1985        struct cfq_data *cfqd = q->elevator->elevator_data;
1986        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1987
1988        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1989
1990        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1991        cfq_remove_request(rq);
1992        cfqq->dispatched++;
1993        (RQ_CFQG(rq))->dispatched++;
1994        elv_dispatch_sort(q, rq);
1995
1996        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1997        cfqq->nr_sectors += blk_rq_sectors(rq);
1998        cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1999                                        rq_data_dir(rq), rq_is_sync(rq));
2000}
2001
2002/*
2003 * return expired entry, or NULL to just start from scratch in rbtree
2004 */
2005static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2006{
2007        struct request *rq = NULL;
2008
2009        if (cfq_cfqq_fifo_expire(cfqq))
2010                return NULL;
2011
2012        cfq_mark_cfqq_fifo_expire(cfqq);
2013
2014        if (list_empty(&cfqq->fifo))
2015                return NULL;
2016
2017        rq = rq_entry_fifo(cfqq->fifo.next);
2018        if (time_before(jiffies, rq_fifo_time(rq)))
2019                rq = NULL;
2020
2021        cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2022        return rq;
2023}
2024
2025static inline int
2026cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2027{
2028        const int base_rq = cfqd->cfq_slice_async_rq;
2029
2030        WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2031
2032        return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
2033}
2034
2035/*
2036 * Must be called with the queue_lock held.
2037 */
2038static int cfqq_process_refs(struct cfq_queue *cfqq)
2039{
2040        int process_refs, io_refs;
2041
2042        io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2043        process_refs = cfqq->ref - io_refs;
2044        BUG_ON(process_refs < 0);
2045        return process_refs;
2046}
2047
2048static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2049{
2050        int process_refs, new_process_refs;
2051        struct cfq_queue *__cfqq;
2052
2053        /*
2054         * If there are no process references on the new_cfqq, then it is
2055         * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2056         * chain may have dropped their last reference (not just their
2057         * last process reference).
2058         */
2059        if (!cfqq_process_refs(new_cfqq))
2060                return;
2061
2062        /* Avoid a circular list and skip interim queue merges */
2063        while ((__cfqq = new_cfqq->new_cfqq)) {
2064                if (__cfqq == cfqq)
2065                        return;
2066                new_cfqq = __cfqq;
2067        }
2068
2069        process_refs = cfqq_process_refs(cfqq);
2070        new_process_refs = cfqq_process_refs(new_cfqq);
2071        /*
2072         * If the process for the cfqq has gone away, there is no
2073         * sense in merging the queues.
2074         */
2075        if (process_refs == 0 || new_process_refs == 0)
2076                return;
2077
2078        /*
2079         * Merge in the direction of the lesser amount of work.
2080         */
2081        if (new_process_refs >= process_refs) {
2082                cfqq->new_cfqq = new_cfqq;
2083                new_cfqq->ref += process_refs;
2084        } else {
2085                new_cfqq->new_cfqq = cfqq;
2086                cfqq->ref += new_process_refs;
2087        }
2088}
2089
2090static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2091                                struct cfq_group *cfqg, enum wl_prio_t prio)
2092{
2093        struct cfq_queue *queue;
2094        int i;
2095        bool key_valid = false;
2096        unsigned long lowest_key = 0;
2097        enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2098
2099        for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2100                /* select the one with lowest rb_key */
2101                queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2102                if (queue &&
2103                    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2104                        lowest_key = queue->rb_key;
2105                        cur_best = i;
2106                        key_valid = true;
2107                }
2108        }
2109
2110        return cur_best;
2111}
2112
2113static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2114{
2115        unsigned slice;
2116        unsigned count;
2117        struct cfq_rb_root *st;
2118        unsigned group_slice;
2119        enum wl_prio_t original_prio = cfqd->serving_prio;
2120
2121        /* Choose next priority. RT > BE > IDLE */
2122        if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2123                cfqd->serving_prio = RT_WORKLOAD;
2124        else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2125                cfqd->serving_prio = BE_WORKLOAD;
2126        else {
2127                cfqd->serving_prio = IDLE_WORKLOAD;
2128                cfqd->workload_expires = jiffies + 1;
2129                return;
2130        }
2131
2132        if (original_prio != cfqd->serving_prio)
2133                goto new_workload;
2134
2135        /*
2136         * For RT and BE, we have to choose also the type
2137         * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2138         * expiration time
2139         */
2140        st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2141        count = st->count;
2142
2143        /*
2144         * check workload expiration, and that we still have other queues ready
2145         */
2146        if (count && !time_after(jiffies, cfqd->workload_expires))
2147                return;
2148
2149new_workload:
2150        /* otherwise select new workload type */
2151        cfqd->serving_type =
2152                cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2153        st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2154        count = st->count;
2155
2156        /*
2157         * the workload slice is computed as a fraction of target latency
2158         * proportional to the number of queues in that workload, over
2159         * all the queues in the same priority class
2160         */
2161        group_slice = cfq_group_slice(cfqd, cfqg);
2162
2163        slice = group_slice * count /
2164                max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2165                      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2166
2167        if (cfqd->serving_type == ASYNC_WORKLOAD) {
2168                unsigned int tmp;
2169
2170                /*
2171                 * Async queues are currently system wide. Just taking
2172                 * proportion of queues with-in same group will lead to higher
2173                 * async ratio system wide as generally root group is going
2174                 * to have higher weight. A more accurate thing would be to
2175                 * calculate system wide asnc/sync ratio.
2176                 */
2177                tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2178                tmp = tmp/cfqd->busy_queues;
2179                slice = min_t(unsigned, slice, tmp);
2180
2181                /* async workload slice is scaled down according to
2182                 * the sync/async slice ratio. */
2183                slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2184        } else
2185                /* sync workload slice is at least 2 * cfq_slice_idle */
2186                slice = max(slice, 2 * cfqd->cfq_slice_idle);
2187
2188        slice = max_t(unsigned, slice, CFQ_MIN_TT);
2189        cfq_log(cfqd, "workload slice:%d", slice);
2190        cfqd->workload_expires = jiffies + slice;
2191}
2192
2193static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2194{
2195        struct cfq_rb_root *st = &cfqd->grp_service_tree;
2196        struct cfq_group *cfqg;
2197
2198        if (RB_EMPTY_ROOT(&st->rb))
2199                return NULL;
2200        cfqg = cfq_rb_first_group(st);
2201        update_min_vdisktime(st);
2202        return cfqg;
2203}
2204
2205static void cfq_choose_cfqg(struct cfq_data *cfqd)
2206{
2207        struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2208
2209        cfqd->serving_group = cfqg;
2210
2211        /* Restore the workload type data */
2212        if (cfqg->saved_workload_slice) {
2213                cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2214                cfqd->serving_type = cfqg->saved_workload;
2215                cfqd->serving_prio = cfqg->saved_serving_prio;
2216        } else
2217                cfqd->workload_expires = jiffies - 1;
2218
2219        choose_service_tree(cfqd, cfqg);
2220}
2221
2222/*
2223 * Select a queue for service. If we have a current active queue,
2224 * check whether to continue servicing it, or retrieve and set a new one.
2225 */
2226static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2227{
2228        struct cfq_queue *cfqq, *new_cfqq = NULL;
2229
2230        cfqq = cfqd->active_queue;
2231        if (!cfqq)
2232                goto new_queue;
2233
2234        if (!cfqd->rq_queued)
2235                return NULL;
2236
2237        /*
2238         * We were waiting for group to get backlogged. Expire the queue
2239         */
2240        if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2241                goto expire;
2242
2243        /*
2244         * The active queue has run out of time, expire it and select new.
2245         */
2246        if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2247                /*
2248                 * If slice had not expired at the completion of last request
2249                 * we might not have turned on wait_busy flag. Don't expire
2250                 * the queue yet. Allow the group to get backlogged.
2251                 *
2252                 * The very fact that we have used the slice, that means we
2253                 * have been idling all along on this queue and it should be
2254                 * ok to wait for this request to complete.
2255                 */
2256                if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2257                    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2258                        cfqq = NULL;
2259                        goto keep_queue;
2260                } else
2261                        goto check_group_idle;
2262        }
2263
2264        /*
2265         * The active queue has requests and isn't expired, allow it to
2266         * dispatch.
2267         */
2268        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2269                goto keep_queue;
2270
2271        /*
2272         * If another queue has a request waiting within our mean seek
2273         * distance, let it run.  The expire code will check for close
2274         * cooperators and put the close queue at the front of the service
2275         * tree.  If possible, merge the expiring queue with the new cfqq.
2276         */
2277        new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2278        if (new_cfqq) {
2279                if (!cfqq->new_cfqq)
2280                        cfq_setup_merge(cfqq, new_cfqq);
2281                goto expire;
2282        }
2283
2284        /*
2285         * No requests pending. If the active queue still has requests in
2286         * flight or is idling for a new request, allow either of these
2287         * conditions to happen (or time out) before selecting a new queue.
2288         */
2289        if (timer_pending(&cfqd->idle_slice_timer)) {
2290                cfqq = NULL;
2291                goto keep_queue;
2292        }
2293
2294        /*
2295         * This is a deep seek queue, but the device is much faster than
2296         * the queue can deliver, don't idle
2297         **/
2298        if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2299            (cfq_cfqq_slice_new(cfqq) ||
2300            (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2301                cfq_clear_cfqq_deep(cfqq);
2302                cfq_clear_cfqq_idle_window(cfqq);
2303        }
2304
2305        if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2306                cfqq = NULL;
2307                goto keep_queue;
2308        }
2309
2310        /*
2311         * If group idle is enabled and there are requests dispatched from
2312         * this group, wait for requests to complete.
2313         */
2314check_group_idle:
2315        if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2316            && cfqq->cfqg->dispatched) {
2317                cfqq = NULL;
2318                goto keep_queue;
2319        }
2320
2321expire:
2322        cfq_slice_expired(cfqd, 0);
2323new_queue:
2324        /*
2325         * Current queue expired. Check if we have to switch to a new
2326         * service tree
2327         */
2328        if (!new_cfqq)
2329                cfq_choose_cfqg(cfqd);
2330
2331        cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2332keep_queue:
2333        return cfqq;
2334}
2335
2336static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2337{
2338        int dispatched = 0;
2339
2340        while (cfqq->next_rq) {
2341                cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2342                dispatched++;
2343        }
2344
2345        BUG_ON(!list_empty(&cfqq->fifo));
2346
2347        /* By default cfqq is not expired if it is empty. Do it explicitly */
2348        __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2349        return dispatched;
2350}
2351
2352/*
2353 * Drain our current requests. Used for barriers and when switching
2354 * io schedulers on-the-fly.
2355 */
2356static int cfq_forced_dispatch(struct cfq_data *cfqd)
2357{
2358        struct cfq_queue *cfqq;
2359        int dispatched = 0;
2360
2361        /* Expire the timeslice of the current active queue first */
2362        cfq_slice_expired(cfqd, 0);
2363        while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2364                __cfq_set_active_queue(cfqd, cfqq);
2365                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2366        }
2367
2368        BUG_ON(cfqd->busy_queues);
2369
2370        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2371        return dispatched;
2372}
2373
2374static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2375        struct cfq_queue *cfqq)
2376{
2377        /* the queue hasn't finished any request, can't estimate */
2378        if (cfq_cfqq_slice_new(cfqq))
2379                return true;
2380        if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2381                cfqq->slice_end))
2382                return true;
2383
2384        return false;
2385}
2386
2387static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2388{
2389        unsigned int max_dispatch;
2390
2391        /*
2392         * Drain async requests before we start sync IO
2393         */
2394        if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2395                return false;
2396
2397        /*
2398         * If this is an async queue and we have sync IO in flight, let it wait
2399         */
2400        if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2401                return false;
2402
2403        max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2404        if (cfq_class_idle(cfqq))
2405                max_dispatch = 1;
2406
2407        /*
2408         * Does this cfqq already have too much IO in flight?
2409         */
2410        if (cfqq->dispatched >= max_dispatch) {
2411                bool promote_sync = false;
2412                /*
2413                 * idle queue must always only have a single IO in flight
2414                 */
2415                if (cfq_class_idle(cfqq))
2416                        return false;
2417
2418                /*
2419                 * If there is only one sync queue
2420                 * we can ignore async queue here and give the sync
2421                 * queue no dispatch limit. The reason is a sync queue can
2422                 * preempt async queue, limiting the sync queue doesn't make
2423                 * sense. This is useful for aiostress test.
2424                 */
2425                if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2426                        promote_sync = true;
2427
2428                /*
2429                 * We have other queues, don't allow more IO from this one
2430                 */
2431                if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2432                                !promote_sync)
2433                        return false;
2434
2435                /*
2436                 * Sole queue user, no limit
2437                 */
2438                if (cfqd->busy_queues == 1 || promote_sync)
2439                        max_dispatch = -1;
2440                else
2441                        /*
2442                         * Normally we start throttling cfqq when cfq_quantum/2
2443                         * requests have been dispatched. But we can drive
2444                         * deeper queue depths at the beginning of slice
2445                         * subjected to upper limit of cfq_quantum.
2446                         * */
2447                        max_dispatch = cfqd->cfq_quantum;
2448        }
2449
2450        /*
2451         * Async queues must wait a bit before being allowed dispatch.
2452         * We also ramp up the dispatch depth gradually for async IO,
2453         * based on the last sync IO we serviced
2454         */
2455        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2456                unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2457                unsigned int depth;
2458
2459                depth = last_sync / cfqd->cfq_slice[1];
2460                if (!depth && !cfqq->dispatched)
2461                        depth = 1;
2462                if (depth < max_dispatch)
2463                        max_dispatch = depth;
2464        }
2465
2466        /*
2467         * If we're below the current max, allow a dispatch
2468         */
2469        return cfqq->dispatched < max_dispatch;
2470}
2471
2472/*
2473 * Dispatch a request from cfqq, moving them to the request queue
2474 * dispatch list.
2475 */
2476static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2477{
2478        struct request *rq;
2479
2480        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2481
2482        if (!cfq_may_dispatch(cfqd, cfqq))
2483                return false;
2484
2485        /*
2486         * follow expired path, else get first next available
2487         */
2488        rq = cfq_check_fifo(cfqq);
2489        if (!rq)
2490                rq = cfqq->next_rq;
2491
2492        /*
2493         * insert request into driver dispatch list
2494         */
2495        cfq_dispatch_insert(cfqd->queue, rq);
2496
2497        if (!cfqd->active_cic) {
2498                struct cfq_io_context *cic = RQ_CIC(rq);
2499
2500                atomic_long_inc(&cic->ioc->refcount);
2501                cfqd->active_cic = cic;
2502        }
2503
2504        return true;
2505}
2506
2507/*
2508 * Find the cfqq that we need to service and move a request from that to the
2509 * dispatch list
2510 */
2511static int cfq_dispatch_requests(struct request_queue *q, int force)
2512{
2513        struct cfq_data *cfqd = q->elevator->elevator_data;
2514        struct cfq_queue *cfqq;
2515
2516        if (!cfqd->busy_queues)
2517                return 0;
2518
2519        if (unlikely(force))
2520                return cfq_forced_dispatch(cfqd);
2521
2522        cfqq = cfq_select_queue(cfqd);
2523        if (!cfqq)
2524                return 0;
2525
2526        /*
2527         * Dispatch a request from this cfqq, if it is allowed
2528         */
2529        if (!cfq_dispatch_request(cfqd, cfqq))
2530                return 0;
2531
2532        cfqq->slice_dispatch++;
2533        cfq_clear_cfqq_must_dispatch(cfqq);
2534
2535        /*
2536         * expire an async queue immediately if it has used up its slice. idle
2537         * queue always expire after 1 dispatch round.
2538         */
2539        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2540            cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2541            cfq_class_idle(cfqq))) {
2542                cfqq->slice_end = jiffies + 1;
2543                cfq_slice_expired(cfqd, 0);
2544        }
2545
2546        cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2547        return 1;
2548}
2549
2550/*
2551 * task holds one reference to the queue, dropped when task exits. each rq
2552 * in-flight on this queue also holds a reference, dropped when rq is freed.
2553 *
2554 * Each cfq queue took a reference on the parent group. Drop it now.
2555 * queue lock must be held here.
2556 */
2557static void cfq_put_queue(struct cfq_queue *cfqq)
2558{
2559        struct cfq_data *cfqd = cfqq->cfqd;
2560        struct cfq_group *cfqg;
2561
2562        BUG_ON(cfqq->ref <= 0);
2563
2564        cfqq->ref--;
2565        if (cfqq->ref)
2566                return;
2567
2568        cfq_log_cfqq(cfqd, cfqq, "put_queue");
2569        BUG_ON(rb_first(&cfqq->sort_list));
2570        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2571        cfqg = cfqq->cfqg;
2572
2573        if (unlikely(cfqd->active_queue == cfqq)) {
2574                __cfq_slice_expired(cfqd, cfqq, 0);
2575                cfq_schedule_dispatch(cfqd);
2576        }
2577
2578        BUG_ON(cfq_cfqq_on_rr(cfqq));
2579        kmem_cache_free(cfq_pool, cfqq);
2580        cfq_put_cfqg(cfqg);
2581}
2582
2583/*
2584 * Call func for each cic attached to this ioc.
2585 */
2586static void
2587call_for_each_cic(struct io_context *ioc,
2588                  void (*func)(struct io_context *, struct cfq_io_context *))
2589{
2590        struct cfq_io_context *cic;
2591        struct hlist_node *n;
2592
2593        rcu_read_lock();
2594
2595        hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2596                func(ioc, cic);
2597
2598        rcu_read_unlock();
2599}
2600
2601static void cfq_cic_free_rcu(struct rcu_head *head)
2602{
2603        struct cfq_io_context *cic;
2604
2605        cic = container_of(head, struct cfq_io_context, rcu_head);
2606
2607        kmem_cache_free(cfq_ioc_pool, cic);
2608        elv_ioc_count_dec(cfq_ioc_count);
2609
2610        if (ioc_gone) {
2611                /*
2612                 * CFQ scheduler is exiting, grab exit lock and check
2613                 * the pending io context count. If it hits zero,
2614                 * complete ioc_gone and set it back to NULL
2615                 */
2616                spin_lock(&ioc_gone_lock);
2617                if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2618                        complete(ioc_gone);
2619                        ioc_gone = NULL;
2620                }
2621                spin_unlock(&ioc_gone_lock);
2622        }
2623}
2624
2625static void cfq_cic_free(struct cfq_io_context *cic)
2626{
2627        call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2628}
2629
2630static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2631{
2632        unsigned long flags;
2633        unsigned long dead_key = (unsigned long) cic->key;
2634
2635        BUG_ON(!(dead_key & CIC_DEAD_KEY));
2636
2637        spin_lock_irqsave(&ioc->lock, flags);
2638        radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2639        hlist_del_rcu(&cic->cic_list);
2640        spin_unlock_irqrestore(&ioc->lock, flags);
2641
2642        cfq_cic_free(cic);
2643}
2644
2645/*
2646 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2647 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2648 * and ->trim() which is called with the task lock held
2649 */
2650static void cfq_free_io_context(struct io_context *ioc)
2651{
2652        /*
2653         * ioc->refcount is zero here, or we are called from elv_unregister(),
2654         * so no more cic's are allowed to be linked into this ioc.  So it
2655         * should be ok to iterate over the known list, we will see all cic's
2656         * since no new ones are added.
2657         */
2658        call_for_each_cic(ioc, cic_free_func);
2659}
2660
2661static void cfq_put_cooperator(struct cfq_queue *cfqq)
2662{
2663        struct cfq_queue *__cfqq, *next;
2664
2665        /*
2666         * If this queue was scheduled to merge with another queue, be
2667         * sure to drop the reference taken on that queue (and others in
2668         * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2669         */
2670        __cfqq = cfqq->new_cfqq;
2671        while (__cfqq) {
2672                if (__cfqq == cfqq) {
2673                        WARN(1, "cfqq->new_cfqq loop detected\n");
2674                        break;
2675                }
2676                next = __cfqq->new_cfqq;
2677                cfq_put_queue(__cfqq);
2678                __cfqq = next;
2679        }
2680}
2681
2682static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2683{
2684        if (unlikely(cfqq == cfqd->active_queue)) {
2685                __cfq_slice_expired(cfqd, cfqq, 0);
2686                cfq_schedule_dispatch(cfqd);
2687        }
2688
2689        cfq_put_cooperator(cfqq);
2690
2691        cfq_put_queue(cfqq);
2692}
2693
2694static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2695                                         struct cfq_io_context *cic)
2696{
2697        struct io_context *ioc = cic->ioc;
2698
2699        list_del_init(&cic->queue_list);
2700
2701        /*
2702         * Make sure dead mark is seen for dead queues
2703         */
2704        smp_wmb();
2705        cic->key = cfqd_dead_key(cfqd);
2706
2707        rcu_read_lock();
2708        if (rcu_dereference(ioc->ioc_data) == cic) {
2709                rcu_read_unlock();
2710                spin_lock(&ioc->lock);
2711                rcu_assign_pointer(ioc->ioc_data, NULL);
2712                spin_unlock(&ioc->lock);
2713        } else
2714                rcu_read_unlock();
2715
2716        if (cic->cfqq[BLK_RW_ASYNC]) {
2717                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2718                cic->cfqq[BLK_RW_ASYNC] = NULL;
2719        }
2720
2721        if (cic->cfqq[BLK_RW_SYNC]) {
2722                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2723                cic->cfqq[BLK_RW_SYNC] = NULL;
2724        }
2725}
2726
2727static void cfq_exit_single_io_context(struct io_context *ioc,
2728                                       struct cfq_io_context *cic)
2729{
2730        struct cfq_data *cfqd = cic_to_cfqd(cic);
2731
2732        if (cfqd) {
2733                struct request_queue *q = cfqd->queue;
2734                unsigned long flags;
2735
2736                spin_lock_irqsave(q->queue_lock, flags);
2737
2738                /*
2739                 * Ensure we get a fresh copy of the ->key to prevent
2740                 * race between exiting task and queue
2741                 */
2742                smp_read_barrier_depends();
2743                if (cic->key == cfqd)
2744                        __cfq_exit_single_io_context(cfqd, cic);
2745
2746                spin_unlock_irqrestore(q->queue_lock, flags);
2747        }
2748}
2749
2750/*
2751 * The process that ioc belongs to has exited, we need to clean up
2752 * and put the internal structures we have that belongs to that process.
2753 */
2754static void cfq_exit_io_context(struct io_context *ioc)
2755{
2756        call_for_each_cic(ioc, cfq_exit_single_io_context);
2757}
2758
2759static struct cfq_io_context *
2760cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2761{
2762        struct cfq_io_context *cic;
2763
2764        cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2765                                                        cfqd->queue->node);
2766        if (cic) {
2767                cic->last_end_request = jiffies;
2768                INIT_LIST_HEAD(&cic->queue_list);
2769                INIT_HLIST_NODE(&cic->cic_list);
2770                cic->dtor = cfq_free_io_context;
2771                cic->exit = cfq_exit_io_context;
2772                elv_ioc_count_inc(cfq_ioc_count);
2773        }
2774
2775        return cic;
2776}
2777
2778static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2779{
2780        struct task_struct *tsk = current;
2781        int ioprio_class;
2782
2783        if (!cfq_cfqq_prio_changed(cfqq))
2784                return;
2785
2786        ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2787        switch (ioprio_class) {
2788        default:
2789                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2790        case IOPRIO_CLASS_NONE:
2791                /*
2792                 * no prio set, inherit CPU scheduling settings
2793                 */
2794                cfqq->ioprio = task_nice_ioprio(tsk);
2795                cfqq->ioprio_class = task_nice_ioclass(tsk);
2796                break;
2797        case IOPRIO_CLASS_RT:
2798                cfqq->ioprio = task_ioprio(ioc);
2799                cfqq->ioprio_class = IOPRIO_CLASS_RT;
2800                break;
2801        case IOPRIO_CLASS_BE:
2802                cfqq->ioprio = task_ioprio(ioc);
2803                cfqq->ioprio_class = IOPRIO_CLASS_BE;
2804                break;
2805        case IOPRIO_CLASS_IDLE:
2806                cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2807                cfqq->ioprio = 7;
2808                cfq_clear_cfqq_idle_window(cfqq);
2809                break;
2810        }
2811
2812        /*
2813         * keep track of original prio settings in case we have to temporarily
2814         * elevate the priority of this queue
2815         */
2816        cfqq->org_ioprio = cfqq->ioprio;
2817        cfqq->org_ioprio_class = cfqq->ioprio_class;
2818        cfq_clear_cfqq_prio_changed(cfqq);
2819}
2820
2821static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2822{
2823        struct cfq_data *cfqd = cic_to_cfqd(cic);
2824        struct cfq_queue *cfqq;
2825        unsigned long flags;
2826
2827        if (unlikely(!cfqd))
2828                return;
2829
2830        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2831
2832        cfqq = cic->cfqq[BLK_RW_ASYNC];
2833        if (cfqq) {
2834                struct cfq_queue *new_cfqq;
2835                new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2836                                                GFP_ATOMIC);
2837                if (new_cfqq) {
2838                        cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2839                        cfq_put_queue(cfqq);
2840                }
2841        }
2842
2843        cfqq = cic->cfqq[BLK_RW_SYNC];
2844        if (cfqq)
2845                cfq_mark_cfqq_prio_changed(cfqq);
2846
2847        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2848}
2849
2850static void cfq_ioc_set_ioprio(struct io_context *ioc)
2851{
2852        call_for_each_cic(ioc, changed_ioprio);
2853        ioc->ioprio_changed = 0;
2854}
2855
2856static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2857                          pid_t pid, bool is_sync)
2858{
2859        RB_CLEAR_NODE(&cfqq->rb_node);
2860        RB_CLEAR_NODE(&cfqq->p_node);
2861        INIT_LIST_HEAD(&cfqq->fifo);
2862
2863        cfqq->ref = 0;
2864        cfqq->cfqd = cfqd;
2865
2866        cfq_mark_cfqq_prio_changed(cfqq);
2867
2868        if (is_sync) {
2869                if (!cfq_class_idle(cfqq))
2870                        cfq_mark_cfqq_idle_window(cfqq);
2871                cfq_mark_cfqq_sync(cfqq);
2872        }
2873        cfqq->pid = pid;
2874}
2875
2876#ifdef CONFIG_CFQ_GROUP_IOSCHED
2877static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2878{
2879        struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2880        struct cfq_data *cfqd = cic_to_cfqd(cic);
2881        unsigned long flags;
2882        struct request_queue *q;
2883
2884        if (unlikely(!cfqd))
2885                return;
2886
2887        q = cfqd->queue;
2888
2889        spin_lock_irqsave(q->queue_lock, flags);
2890
2891        if (sync_cfqq) {
2892                /*
2893                 * Drop reference to sync queue. A new sync queue will be
2894                 * assigned in new group upon arrival of a fresh request.
2895                 */
2896                cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2897                cic_set_cfqq(cic, NULL, 1);
2898                cfq_put_queue(sync_cfqq);
2899        }
2900
2901        spin_unlock_irqrestore(q->queue_lock, flags);
2902}
2903
2904static void cfq_ioc_set_cgroup(struct io_context *ioc)
2905{
2906        call_for_each_cic(ioc, changed_cgroup);
2907        ioc->cgroup_changed = 0;
2908}
2909#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2910
2911static struct cfq_queue *
2912cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2913                     struct io_context *ioc, gfp_t gfp_mask)
2914{
2915        struct cfq_queue *cfqq, *new_cfqq = NULL;
2916        struct cfq_io_context *cic;
2917        struct cfq_group *cfqg;
2918
2919retry:
2920        cfqg = cfq_get_cfqg(cfqd, 1);
2921        cic = cfq_cic_lookup(cfqd, ioc);
2922        /* cic always exists here */
2923        cfqq = cic_to_cfqq(cic, is_sync);
2924
2925        /*
2926         * Always try a new alloc if we fell back to the OOM cfqq
2927         * originally, since it should just be a temporary situation.
2928         */
2929        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2930                cfqq = NULL;
2931                if (new_cfqq) {
2932                        cfqq = new_cfqq;
2933                        new_cfqq = NULL;
2934                } else if (gfp_mask & __GFP_WAIT) {
2935                        spin_unlock_irq(cfqd->queue->queue_lock);
2936                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
2937                                        gfp_mask | __GFP_ZERO,
2938                                        cfqd->queue->node);
2939                        spin_lock_irq(cfqd->queue->queue_lock);
2940                        if (new_cfqq)
2941                                goto retry;
2942                } else {
2943                        cfqq = kmem_cache_alloc_node(cfq_pool,
2944                                        gfp_mask | __GFP_ZERO,
2945                                        cfqd->queue->node);
2946                }
2947
2948                if (cfqq) {
2949                        cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2950                        cfq_init_prio_data(cfqq, ioc);
2951                        cfq_link_cfqq_cfqg(cfqq, cfqg);
2952                        cfq_log_cfqq(cfqd, cfqq, "alloced");
2953                } else
2954                        cfqq = &cfqd->oom_cfqq;
2955        }
2956
2957        if (new_cfqq)
2958                kmem_cache_free(cfq_pool, new_cfqq);
2959
2960        return cfqq;
2961}
2962
2963static struct cfq_queue **
2964cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2965{
2966        switch (ioprio_class) {
2967        case IOPRIO_CLASS_RT:
2968                return &cfqd->async_cfqq[0][ioprio];
2969        case IOPRIO_CLASS_BE:
2970                return &cfqd->async_cfqq[1][ioprio];
2971        case IOPRIO_CLASS_IDLE:
2972                return &cfqd->async_idle_cfqq;
2973        default:
2974                BUG();
2975        }
2976}
2977
2978static struct cfq_queue *
2979cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2980              gfp_t gfp_mask)
2981{
2982        const int ioprio = task_ioprio(ioc);
2983        const int ioprio_class = task_ioprio_class(ioc);
2984        struct cfq_queue **async_cfqq = NULL;
2985        struct cfq_queue *cfqq = NULL;
2986
2987        if (!is_sync) {
2988                async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2989                cfqq = *async_cfqq;
2990        }
2991
2992        if (!cfqq)
2993                cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2994
2995        /*
2996         * pin the queue now that it's allocated, scheduler exit will prune it
2997         */
2998        if (!is_sync && !(*async_cfqq)) {
2999                cfqq->ref++;
3000                *async_cfqq = cfqq;
3001        }
3002
3003        cfqq->ref++;
3004        return cfqq;
3005}
3006
3007/*
3008 * We drop cfq io contexts lazily, so we may find a dead one.
3009 */
3010static void
3011cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
3012                  struct cfq_io_context *cic)
3013{
3014        unsigned long flags;
3015
3016        WARN_ON(!list_empty(&cic->queue_list));
3017        BUG_ON(cic->key != cfqd_dead_key(cfqd));
3018
3019        spin_lock_irqsave(&ioc->lock, flags);
3020
3021        BUG_ON(rcu_dereference_check(ioc->ioc_data,
3022                lockdep_is_held(&ioc->lock)) == cic);
3023
3024        radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
3025        hlist_del_rcu(&cic->cic_list);
3026        spin_unlock_irqrestore(&ioc->lock, flags);
3027
3028        cfq_cic_free(cic);
3029}
3030
3031static struct cfq_io_context *
3032cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3033{
3034        struct cfq_io_context *cic;
3035        unsigned long flags;
3036
3037        if (unlikely(!ioc))
3038                return NULL;
3039
3040        rcu_read_lock();
3041
3042        /*
3043         * we maintain a last-hit cache, to avoid browsing over the tree
3044         */
3045        cic = rcu_dereference(ioc->ioc_data);
3046        if (cic && cic->key == cfqd) {
3047                rcu_read_unlock();
3048                return cic;
3049        }
3050
3051        do {
3052                cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3053                rcu_read_unlock();
3054                if (!cic)
3055                        break;
3056                if (unlikely(cic->key != cfqd)) {
3057                        cfq_drop_dead_cic(cfqd, ioc, cic);
3058                        rcu_read_lock();
3059                        continue;
3060                }
3061
3062                spin_lock_irqsave(&ioc->lock, flags);
3063                rcu_assign_pointer(ioc->ioc_data, cic);
3064                spin_unlock_irqrestore(&ioc->lock, flags);
3065                break;
3066        } while (1);
3067
3068        return cic;
3069}
3070
3071/*
3072 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3073 * the process specific cfq io context when entered from the block layer.
3074 * Also adds the cic to a per-cfqd list, used when this queue is removed.
3075 */
3076static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3077                        struct cfq_io_context *cic, gfp_t gfp_mask)
3078{
3079        unsigned long flags;
3080        int ret;
3081
3082        ret = radix_tree_preload(gfp_mask);
3083        if (!ret) {
3084                cic->ioc = ioc;
3085                cic->key = cfqd;
3086
3087                spin_lock_irqsave(&ioc->lock, flags);
3088                ret = radix_tree_insert(&ioc->radix_root,
3089                                                cfqd->cic_index, cic);
3090                if (!ret)
3091                        hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
3092                spin_unlock_irqrestore(&ioc->lock, flags);
3093
3094                radix_tree_preload_end();
3095
3096                if (!ret) {
3097                        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3098                        list_add(&cic->queue_list, &cfqd->cic_list);
3099                        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3100                }
3101        }
3102
3103        if (ret)
3104                printk(KERN_ERR "cfq: cic link failed!\n");
3105
3106        return ret;
3107}
3108
3109/*
3110 * Setup general io context and cfq io context. There can be several cfq
3111 * io contexts per general io context, if this process is doing io to more
3112 * than one device managed by cfq.
3113 */
3114static struct cfq_io_context *
3115cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3116{
3117        struct io_context *ioc = NULL;
3118        struct cfq_io_context *cic;
3119
3120        might_sleep_if(gfp_mask & __GFP_WAIT);
3121
3122        ioc = get_io_context(gfp_mask, cfqd->queue->node);
3123        if (!ioc)
3124                return NULL;
3125
3126        cic = cfq_cic_lookup(cfqd, ioc);
3127        if (cic)
3128                goto out;
3129
3130        cic = cfq_alloc_io_context(cfqd, gfp_mask);
3131        if (cic == NULL)
3132                goto err;
3133
3134        if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3135                goto err_free;
3136
3137out:
3138        smp_read_barrier_depends();
3139        if (unlikely(ioc->ioprio_changed))
3140                cfq_ioc_set_ioprio(ioc);
3141
3142#ifdef CONFIG_CFQ_GROUP_IOSCHED
3143        if (unlikely(ioc->cgroup_changed))
3144                cfq_ioc_set_cgroup(ioc);
3145#endif
3146        return cic;
3147err_free:
3148        cfq_cic_free(cic);
3149err:
3150        put_io_context(ioc);
3151        return NULL;
3152}
3153
3154static void
3155cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3156{
3157        unsigned long elapsed = jiffies - cic->last_end_request;
3158        unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3159
3160        cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3161        cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3162        cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3163}
3164
3165static void
3166cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3167                       struct request *rq)
3168{
3169        sector_t sdist = 0;
3170        sector_t n_sec = blk_rq_sectors(rq);
3171        if (cfqq->last_request_pos) {
3172                if (cfqq->last_request_pos < blk_rq_pos(rq))
3173                        sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3174                else
3175                        sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3176        }
3177
3178        cfqq->seek_history <<= 1;
3179        if (blk_queue_nonrot(cfqd->queue))
3180                cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3181        else
3182                cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3183}
3184
3185/*
3186 * Disable idle window if the process thinks too long or seeks so much that
3187 * it doesn't matter
3188 */
3189static void
3190cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3191                       struct cfq_io_context *cic)
3192{
3193        int old_idle, enable_idle;
3194
3195        /*
3196         * Don't idle for async or idle io prio class
3197         */
3198        if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3199                return;
3200
3201        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3202
3203        if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3204                cfq_mark_cfqq_deep(cfqq);
3205
3206        if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3207                enable_idle = 0;
3208        else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3209            (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3210                enable_idle = 0;
3211        else if (sample_valid(cic->ttime_samples)) {
3212                if (cic->ttime_mean > cfqd->cfq_slice_idle)
3213                        enable_idle = 0;
3214                else
3215                        enable_idle = 1;
3216        }
3217
3218        if (old_idle != enable_idle) {
3219                cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3220                if (enable_idle)
3221                        cfq_mark_cfqq_idle_window(cfqq);
3222                else
3223                        cfq_clear_cfqq_idle_window(cfqq);
3224        }
3225}
3226
3227/*
3228 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3229 * no or if we aren't sure, a 1 will cause a preempt.
3230 */
3231static bool
3232cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3233                   struct request *rq)
3234{
3235        struct cfq_queue *cfqq;
3236
3237        cfqq = cfqd->active_queue;
3238        if (!cfqq)
3239                return false;
3240
3241        if (cfq_class_idle(new_cfqq))
3242                return false;
3243
3244        if (cfq_class_idle(cfqq))
3245                return true;
3246
3247        /*
3248         * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3249         */
3250        if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3251                return false;
3252
3253        /*
3254         * if the new request is sync, but the currently running queue is
3255         * not, let the sync request have priority.
3256         */
3257        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3258                return true;
3259
3260        if (new_cfqq->cfqg != cfqq->cfqg)
3261                return false;
3262
3263        if (cfq_slice_used(cfqq))
3264                return true;
3265
3266        /* Allow preemption only if we are idling on sync-noidle tree */
3267        if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3268            cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3269            new_cfqq->service_tree->count == 2 &&
3270            RB_EMPTY_ROOT(&cfqq->sort_list))
3271                return true;
3272
3273        /*
3274         * So both queues are sync. Let the new request get disk time if
3275         * it's a metadata request and the current queue is doing regular IO.
3276         */
3277        if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3278                return true;
3279
3280        /*
3281         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3282         */
3283        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3284                return true;
3285
3286        /* An idle queue should not be idle now for some reason */
3287        if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3288                return true;
3289
3290        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3291                return false;
3292
3293        /*
3294         * if this request is as-good as one we would expect from the
3295         * current cfqq, let it preempt
3296         */
3297        if (cfq_rq_close(cfqd, cfqq, rq))
3298                return true;
3299
3300        return false;
3301}
3302
3303/*
3304 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3305 * let it have half of its nominal slice.
3306 */
3307static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3308{
3309        struct cfq_queue *old_cfqq = cfqd->active_queue;
3310
3311        cfq_log_cfqq(cfqd, cfqq, "preempt");
3312        cfq_slice_expired(cfqd, 1);
3313
3314        /*
3315         * workload type is changed, don't save slice, otherwise preempt
3316         * doesn't happen
3317         */
3318        if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
3319                cfqq->cfqg->saved_workload_slice = 0;
3320
3321        /*
3322         * Put the new queue at the front of the of the current list,
3323         * so we know that it will be selected next.
3324         */
3325        BUG_ON(!cfq_cfqq_on_rr(cfqq));
3326
3327        cfq_service_tree_add(cfqd, cfqq, 1);
3328
3329        cfqq->slice_end = 0;
3330        cfq_mark_cfqq_slice_new(cfqq);
3331}
3332
3333/*
3334 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3335 * something we should do about it
3336 */
3337static void
3338cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3339                struct request *rq)
3340{
3341        struct cfq_io_context *cic = RQ_CIC(rq);
3342
3343        cfqd->rq_queued++;
3344        if (rq->cmd_flags & REQ_META)
3345                cfqq->meta_pending++;
3346
3347        cfq_update_io_thinktime(cfqd, cic);
3348        cfq_update_io_seektime(cfqd, cfqq, rq);
3349        cfq_update_idle_window(cfqd, cfqq, cic);
3350
3351        cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3352
3353        if (cfqq == cfqd->active_queue) {
3354                /*
3355                 * Remember that we saw a request from this process, but
3356                 * don't start queuing just yet. Otherwise we risk seeing lots
3357                 * of tiny requests, because we disrupt the normal plugging
3358                 * and merging. If the request is already larger than a single
3359                 * page, let it rip immediately. For that case we assume that
3360                 * merging is already done. Ditto for a busy system that
3361                 * has other work pending, don't risk delaying until the
3362                 * idle timer unplug to continue working.
3363                 */
3364                if (cfq_cfqq_wait_request(cfqq)) {
3365                        if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3366                            cfqd->busy_queues > 1) {
3367                                cfq_del_timer(cfqd, cfqq);
3368                                cfq_clear_cfqq_wait_request(cfqq);
3369                                __blk_run_queue(cfqd->queue);
3370                        } else {
3371                                cfq_blkiocg_update_idle_time_stats(
3372                                                &cfqq->cfqg->blkg);
3373                                cfq_mark_cfqq_must_dispatch(cfqq);
3374                        }
3375                }
3376        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3377                /*
3378                 * not the active queue - expire current slice if it is
3379                 * idle and has expired it's mean thinktime or this new queue
3380                 * has some old slice time left and is of higher priority or
3381                 * this new queue is RT and the current one is BE
3382                 */
3383                cfq_preempt_queue(cfqd, cfqq);
3384                __blk_run_queue(cfqd->queue);
3385        }
3386}
3387
3388static void cfq_insert_request(struct request_queue *q, struct request *rq)
3389{
3390        struct cfq_data *cfqd = q->elevator->elevator_data;
3391        struct cfq_queue *cfqq = RQ_CFQQ(rq);
3392
3393        cfq_log_cfqq(cfqd, cfqq, "insert_request");
3394        cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3395
3396        rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3397        list_add_tail(&rq->queuelist, &cfqq->fifo);
3398        cfq_add_rq_rb(rq);
3399        cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3400                        &cfqd->serving_group->blkg, rq_data_dir(rq),
3401                        rq_is_sync(rq));
3402        cfq_rq_enqueued(cfqd, cfqq, rq);
3403}
3404
3405/*
3406 * Update hw_tag based on peak queue depth over 50 samples under
3407 * sufficient load.
3408 */
3409static void cfq_update_hw_tag(struct cfq_data *cfqd)
3410{
3411        struct cfq_queue *cfqq = cfqd->active_queue;
3412
3413        if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3414                cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3415
3416        if (cfqd->hw_tag == 1)
3417                return;
3418
3419        if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3420            cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3421                return;
3422
3423        /*
3424         * If active queue hasn't enough requests and can idle, cfq might not
3425         * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3426         * case
3427         */
3428        if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3429            cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3430            CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3431                return;
3432
3433        if (cfqd->hw_tag_samples++ < 50)
3434                return;
3435
3436        if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3437                cfqd->hw_tag = 1;
3438        else
3439                cfqd->hw_tag = 0;
3440}
3441
3442static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3443{
3444        struct cfq_io_context *cic = cfqd->active_cic;
3445
3446        /* If the queue already has requests, don't wait */
3447        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3448                return false;
3449
3450        /* If there are other queues in the group, don't wait */
3451        if (cfqq->cfqg->nr_cfqq > 1)
3452                return false;
3453
3454        if (cfq_slice_used(cfqq))
3455                return true;
3456
3457        /* if slice left is less than think time, wait busy */
3458        if (cic && sample_valid(cic->ttime_samples)
3459            && (cfqq->slice_end - jiffies < cic->ttime_mean))
3460                return true;
3461
3462        /*
3463         * If think times is less than a jiffy than ttime_mean=0 and above
3464         * will not be true. It might happen that slice has not expired yet
3465         * but will expire soon (4-5 ns) during select_queue(). To cover the
3466         * case where think time is less than a jiffy, mark the queue wait
3467         * busy if only 1 jiffy is left in the slice.
3468         */
3469        if (cfqq->slice_end - jiffies == 1)
3470                return true;
3471
3472        return false;
3473}
3474
3475static void cfq_completed_request(struct request_queue *q, struct request *rq)
3476{
3477        struct cfq_queue *cfqq = RQ_CFQQ(rq);
3478        struct cfq_data *cfqd = cfqq->cfqd;
3479        const int sync = rq_is_sync(rq);
3480        unsigned long now;
3481
3482        now = jiffies;
3483        cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3484                     !!(rq->cmd_flags & REQ_NOIDLE));
3485
3486        cfq_update_hw_tag(cfqd);
3487
3488        WARN_ON(!cfqd->rq_in_driver);
3489        WARN_ON(!cfqq->dispatched);
3490        cfqd->rq_in_driver--;
3491        cfqq->dispatched--;
3492        (RQ_CFQG(rq))->dispatched--;
3493        cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3494                        rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3495                        rq_data_dir(rq), rq_is_sync(rq));
3496
3497        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3498
3499        if (sync) {
3500                RQ_CIC(rq)->last_end_request = now;
3501                if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3502                        cfqd->last_delayed_sync = now;
3503        }
3504
3505        /*
3506         * If this is the active queue, check if it needs to be expired,
3507         * or if we want to idle in case it has no pending requests.
3508         */
3509        if (cfqd->active_queue == cfqq) {
3510                const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3511
3512                if (cfq_cfqq_slice_new(cfqq)) {
3513                        cfq_set_prio_slice(cfqd, cfqq);
3514                        cfq_clear_cfqq_slice_new(cfqq);
3515                }
3516
3517                /*
3518                 * Should we wait for next request to come in before we expire
3519                 * the queue.
3520                 */
3521                if (cfq_should_wait_busy(cfqd, cfqq)) {
3522                        unsigned long extend_sl = cfqd->cfq_slice_idle;
3523                        if (!cfqd->cfq_slice_idle)
3524                                extend_sl = cfqd->cfq_group_idle;
3525                        cfqq->slice_end = jiffies + extend_sl;
3526                        cfq_mark_cfqq_wait_busy(cfqq);
3527                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3528                }
3529
3530                /*
3531                 * Idling is not enabled on:
3532                 * - expired queues
3533                 * - idle-priority queues
3534                 * - async queues
3535                 * - queues with still some requests queued
3536                 * - when there is a close cooperator
3537                 */
3538                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3539                        cfq_slice_expired(cfqd, 1);
3540                else if (sync && cfqq_empty &&
3541                         !cfq_close_cooperator(cfqd, cfqq)) {
3542                        cfq_arm_slice_timer(cfqd);
3543                }
3544        }
3545
3546        if (!cfqd->rq_in_driver)
3547                cfq_schedule_dispatch(cfqd);
3548}
3549
3550/*
3551 * we temporarily boost lower priority queues if they are holding fs exclusive
3552 * resources. they are boosted to normal prio (CLASS_BE/4)
3553 */
3554static void cfq_prio_boost(struct cfq_queue *cfqq)
3555{
3556        if (has_fs_excl()) {
3557                /*
3558                 * boost idle prio on transactions that would lock out other
3559                 * users of the filesystem
3560                 */
3561                if (cfq_class_idle(cfqq))
3562                        cfqq->ioprio_class = IOPRIO_CLASS_BE;
3563                if (cfqq->ioprio > IOPRIO_NORM)
3564                        cfqq->ioprio = IOPRIO_NORM;
3565        } else {
3566                /*
3567                 * unboost the queue (if needed)
3568                 */
3569                cfqq->ioprio_class = cfqq->org_ioprio_class;
3570                cfqq->ioprio = cfqq->org_ioprio;
3571        }
3572}
3573
3574static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3575{
3576        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3577                cfq_mark_cfqq_must_alloc_slice(cfqq);
3578                return ELV_MQUEUE_MUST;
3579        }
3580
3581        return ELV_MQUEUE_MAY;
3582}
3583
3584static int cfq_may_queue(struct request_queue *q, int rw)
3585{
3586        struct cfq_data *cfqd = q->elevator->elevator_data;
3587        struct task_struct *tsk = current;
3588        struct cfq_io_context *cic;
3589        struct cfq_queue *cfqq;
3590
3591        /*
3592         * don't force setup of a queue from here, as a call to may_queue
3593         * does not necessarily imply that a request actually will be queued.
3594         * so just lookup a possibly existing queue, or return 'may queue'
3595         * if that fails
3596         */
3597        cic = cfq_cic_lookup(cfqd, tsk->io_context);
3598        if (!cic)
3599                return ELV_MQUEUE_MAY;
3600
3601        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3602        if (cfqq) {
3603                cfq_init_prio_data(cfqq, cic->ioc);
3604                cfq_prio_boost(cfqq);
3605
3606                return __cfq_may_queue(cfqq);
3607        }
3608
3609        return ELV_MQUEUE_MAY;
3610}
3611
3612/*
3613 * queue lock held here
3614 */
3615static void cfq_put_request(struct request *rq)
3616{
3617        struct cfq_queue *cfqq = RQ_CFQQ(rq);
3618
3619        if (cfqq) {
3620                const int rw = rq_data_dir(rq);
3621
3622                BUG_ON(!cfqq->allocated[rw]);
3623                cfqq->allocated[rw]--;
3624
3625                put_io_context(RQ_CIC(rq)->ioc);
3626
3627                rq->elevator_private[0] = NULL;
3628                rq->elevator_private[1] = NULL;
3629
3630                /* Put down rq reference on cfqg */
3631                cfq_put_cfqg(RQ_CFQG(rq));
3632                rq->elevator_private[2] = NULL;
3633
3634                cfq_put_queue(cfqq);
3635        }
3636}
3637
3638static struct cfq_queue *
3639cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3640                struct cfq_queue *cfqq)
3641{
3642        cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3643        cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3644        cfq_mark_cfqq_coop(cfqq->new_cfqq);
3645        cfq_put_queue(cfqq);
3646        return cic_to_cfqq(cic, 1);
3647}
3648
3649/*
3650 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3651 * was the last process referring to said cfqq.
3652 */
3653static struct cfq_queue *
3654split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3655{
3656        if (cfqq_process_refs(cfqq) == 1) {
3657                cfqq->pid = current->pid;
3658                cfq_clear_cfqq_coop(cfqq);
3659                cfq_clear_cfqq_split_coop(cfqq);
3660                return cfqq;
3661        }
3662
3663        cic_set_cfqq(cic, NULL, 1);
3664
3665        cfq_put_cooperator(cfqq);
3666
3667        cfq_put_queue(cfqq);
3668        return NULL;
3669}
3670/*
3671 * Allocate cfq data structures associated with this request.
3672 */
3673static int
3674cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3675{
3676        struct cfq_data *cfqd = q->elevator->elevator_data;
3677        struct cfq_io_context *cic;
3678        const int rw = rq_data_dir(rq);
3679        const bool is_sync = rq_is_sync(rq);
3680        struct cfq_queue *cfqq;
3681        unsigned long flags;
3682
3683        might_sleep_if(gfp_mask & __GFP_WAIT);
3684
3685        cic = cfq_get_io_context(cfqd, gfp_mask);
3686
3687        spin_lock_irqsave(q->queue_lock, flags);
3688
3689        if (!cic)
3690                goto queue_fail;
3691
3692new_queue:
3693        cfqq = cic_to_cfqq(cic, is_sync);
3694        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3695                cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3696                cic_set_cfqq(cic, cfqq, is_sync);
3697        } else {
3698                /*
3699                 * If the queue was seeky for too long, break it apart.
3700                 */
3701                if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3702                        cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3703                        cfqq = split_cfqq(cic, cfqq);
3704                        if (!cfqq)
3705                                goto new_queue;
3706                }
3707
3708                /*
3709                 * Check to see if this queue is scheduled to merge with
3710                 * another, closely cooperating queue.  The merging of
3711                 * queues happens here as it must be done in process context.
3712                 * The reference on new_cfqq was taken in merge_cfqqs.
3713                 */
3714                if (cfqq->new_cfqq)
3715                        cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3716        }
3717
3718        cfqq->allocated[rw]++;
3719
3720        cfqq->ref++;
3721        rq->elevator_private[0] = cic;
3722        rq->elevator_private[1] = cfqq;
3723        rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
3724        spin_unlock_irqrestore(q->queue_lock, flags);
3725        return 0;
3726
3727queue_fail:
3728        if (cic)
3729                put_io_context(cic->ioc);
3730
3731        cfq_schedule_dispatch(cfqd);
3732        spin_unlock_irqrestore(q->queue_lock, flags);
3733        cfq_log(cfqd, "set_request fail");
3734        return 1;
3735}
3736
3737static void cfq_kick_queue(struct work_struct *work)
3738{
3739        struct cfq_data *cfqd =
3740                container_of(work, struct cfq_data, unplug_work);
3741        struct request_queue *q = cfqd->queue;
3742
3743        spin_lock_irq(q->queue_lock);
3744        __blk_run_queue(cfqd->queue);
3745        spin_unlock_irq(q->queue_lock);
3746}
3747
3748/*
3749 * Timer running if the active_queue is currently idling inside its time slice
3750 */
3751static void cfq_idle_slice_timer(unsigned long data)
3752{
3753        struct cfq_data *cfqd = (struct cfq_data *) data;
3754        struct cfq_queue *cfqq;
3755        unsigned long flags;
3756        int timed_out = 1;
3757
3758        cfq_log(cfqd, "idle timer fired");
3759
3760        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3761
3762        cfqq = cfqd->active_queue;
3763        if (cfqq) {
3764                timed_out = 0;
3765
3766                /*
3767                 * We saw a request before the queue expired, let it through
3768                 */
3769                if (cfq_cfqq_must_dispatch(cfqq))
3770                        goto out_kick;
3771
3772                /*
3773                 * expired
3774                 */
3775                if (cfq_slice_used(cfqq))
3776                        goto expire;
3777
3778                /*
3779                 * only expire and reinvoke request handler, if there are
3780                 * other queues with pending requests
3781                 */
3782                if (!cfqd->busy_queues)
3783                        goto out_cont;
3784
3785                /*
3786                 * not expired and it has a request pending, let it dispatch
3787                 */
3788                if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3789                        goto out_kick;
3790
3791                /*
3792                 * Queue depth flag is reset only when the idle didn't succeed
3793                 */
3794                cfq_clear_cfqq_deep(cfqq);
3795        }
3796expire:
3797        cfq_slice_expired(cfqd, timed_out);
3798out_kick:
3799        cfq_schedule_dispatch(cfqd);
3800out_cont:
3801        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3802}
3803
3804static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3805{
3806        del_timer_sync(&cfqd->idle_slice_timer);
3807        cancel_work_sync(&cfqd->unplug_work);
3808}
3809
3810static void cfq_put_async_queues(struct cfq_data *cfqd)
3811{
3812        int i;
3813
3814        for (i = 0; i < IOPRIO_BE_NR; i++) {
3815                if (cfqd->async_cfqq[0][i])
3816                        cfq_put_queue(cfqd->async_cfqq[0][i]);
3817                if (cfqd->async_cfqq[1][i])
3818                        cfq_put_queue(cfqd->async_cfqq[1][i]);
3819        }
3820
3821        if (cfqd->async_idle_cfqq)
3822                cfq_put_queue(cfqd->async_idle_cfqq);
3823}
3824
3825static void cfq_cfqd_free(struct rcu_head *head)
3826{
3827        kfree(container_of(head, struct cfq_data, rcu));
3828}
3829
3830static void cfq_exit_queue(struct elevator_queue *e)
3831{
3832        struct cfq_data *cfqd = e->elevator_data;
3833        struct request_queue *q = cfqd->queue;
3834
3835        cfq_shutdown_timer_wq(cfqd);
3836
3837        spin_lock_irq(q->queue_lock);
3838
3839        if (cfqd->active_queue)
3840                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3841
3842        while (!list_empty(&cfqd->cic_list)) {
3843                struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3844                                                        struct cfq_io_context,
3845                                                        queue_list);
3846
3847                __cfq_exit_single_io_context(cfqd, cic);
3848        }
3849
3850        cfq_put_async_queues(cfqd);
3851        cfq_release_cfq_groups(cfqd);
3852        cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3853
3854        spin_unlock_irq(q->queue_lock);
3855
3856        cfq_shutdown_timer_wq(cfqd);
3857
3858        spin_lock(&cic_index_lock);
3859        ida_remove(&cic_index_ida, cfqd->cic_index);
3860        spin_unlock(&cic_index_lock);
3861
3862        /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3863        call_rcu(&cfqd->rcu, cfq_cfqd_free);
3864}
3865
3866static int cfq_alloc_cic_index(void)
3867{
3868        int index, error;
3869
3870        do {
3871                if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3872                        return -ENOMEM;
3873
3874                spin_lock(&cic_index_lock);
3875                error = ida_get_new(&cic_index_ida, &index);
3876                spin_unlock(&cic_index_lock);
3877                if (error && error != -EAGAIN)
3878                        return error;
3879        } while (error);
3880
3881        return index;
3882}
3883
3884static void *cfq_init_queue(struct request_queue *q)
3885{
3886        struct cfq_data *cfqd;
3887        int i, j;
3888        struct cfq_group *cfqg;
3889        struct cfq_rb_root *st;
3890
3891        i = cfq_alloc_cic_index();
3892        if (i < 0)
3893                return NULL;
3894
3895        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3896        if (!cfqd)
3897                return NULL;
3898
3899        /*
3900         * Don't need take queue_lock in the routine, since we are
3901         * initializing the ioscheduler, and nobody is using cfqd
3902         */
3903        cfqd->cic_index = i;
3904
3905        /* Init root service tree */
3906        cfqd->grp_service_tree = CFQ_RB_ROOT;
3907
3908        /* Init root group */
3909        cfqg = &cfqd->root_group;
3910        for_each_cfqg_st(cfqg, i, j, st)
3911                *st = CFQ_RB_ROOT;
3912        RB_CLEAR_NODE(&cfqg->rb_node);
3913
3914        /* Give preference to root group over other groups */
3915        cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3916
3917#ifdef CONFIG_CFQ_GROUP_IOSCHED
3918        /*
3919         * Take a reference to root group which we never drop. This is just
3920         * to make sure that cfq_put_cfqg() does not try to kfree root group
3921         */
3922        cfqg->ref = 1;
3923        rcu_read_lock();
3924        cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3925                                        (void *)cfqd, 0);
3926        rcu_read_unlock();
3927#endif
3928        /*
3929         * Not strictly needed (since RB_ROOT just clears the node and we
3930         * zeroed cfqd on alloc), but better be safe in case someone decides
3931         * to add magic to the rb code
3932         */
3933        for (i = 0; i < CFQ_PRIO_LISTS; i++)
3934                cfqd->prio_trees[i] = RB_ROOT;
3935
3936        /*
3937         * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3938         * Grab a permanent reference to it, so that the normal code flow
3939         * will not attempt to free it.
3940         */
3941        cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3942        cfqd->oom_cfqq.ref++;
3943        cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3944
3945        INIT_LIST_HEAD(&cfqd->cic_list);
3946
3947        cfqd->queue = q;
3948
3949        init_timer(&cfqd->idle_slice_timer);
3950        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3951        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3952
3953        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3954
3955        cfqd->cfq_quantum = cfq_quantum;
3956        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3957        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3958        cfqd->cfq_back_max = cfq_back_max;
3959        cfqd->cfq_back_penalty = cfq_back_penalty;
3960        cfqd->cfq_slice[0] = cfq_slice_async;
3961        cfqd->cfq_slice[1] = cfq_slice_sync;
3962        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3963        cfqd->cfq_slice_idle = cfq_slice_idle;
3964        cfqd->cfq_group_idle = cfq_group_idle;
3965        cfqd->cfq_latency = 1;
3966        cfqd->hw_tag = -1;
3967        /*
3968         * we optimistically start assuming sync ops weren't delayed in last
3969         * second, in order to have larger depth for async operations.
3970         */
3971        cfqd->last_delayed_sync = jiffies - HZ;
3972        return cfqd;
3973}
3974
3975static void cfq_slab_kill(void)
3976{
3977        /*
3978         * Caller already ensured that pending RCU callbacks are completed,
3979         * so we should have no busy allocations at this point.
3980         */
3981        if (cfq_pool)
3982                kmem_cache_destroy(cfq_pool);
3983        if (cfq_ioc_pool)
3984                kmem_cache_destroy(cfq_ioc_pool);
3985}
3986
3987static int __init cfq_slab_setup(void)
3988{
3989        cfq_pool = KMEM_CACHE(cfq_queue, 0);
3990        if (!cfq_pool)
3991                goto fail;
3992
3993        cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3994        if (!cfq_ioc_pool)
3995                goto fail;
3996
3997        return 0;
3998fail:
3999        cfq_slab_kill();
4000        return -ENOMEM;
4001}
4002
4003/*
4004 * sysfs parts below -->
4005 */
4006static ssize_t
4007cfq_var_show(unsigned int var, char *page)
4008{
4009        return sprintf(page, "%d\n", var);
4010}
4011
4012static ssize_t
4013cfq_var_store(unsigned int *var, const char *page, size_t count)
4014{
4015        char *p = (char *) page;
4016
4017        *var = simple_strtoul(p, &p, 10);
4018        return count;
4019}
4020
4021#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4022static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4023{                                                                       \
4024        struct cfq_data *cfqd = e->elevator_data;                       \
4025        unsigned int __data = __VAR;                                    \
4026        if (__CONV)                                                     \
4027                __data = jiffies_to_msecs(__data);                      \
4028        return cfq_var_show(__data, (page));                            \
4029}
4030SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4031SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4032SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4033SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4034SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4035SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4036SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4037SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4038SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4039SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4040SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4041#undef SHOW_FUNCTION
4042
4043#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4044static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4045{                                                                       \
4046        struct cfq_data *cfqd = e->elevator_data;                       \
4047        unsigned int __data;                                            \
4048        int ret = cfq_var_store(&__data, (page), count);                \
4049        if (__data < (MIN))                                             \
4050                __data = (MIN);                                         \
4051        else if (__data > (MAX))                                        \
4052                __data = (MAX);                                         \
4053        if (__CONV)                                                     \
4054                *(__PTR) = msecs_to_jiffies(__data);                    \
4055        else                                                            \
4056                *(__PTR) = __data;                                      \
4057        return ret;                                                     \
4058}
4059STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4060STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4061                UINT_MAX, 1);
4062STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4063                UINT_MAX, 1);
4064STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4065STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4066                UINT_MAX, 0);
4067STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4068STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4069STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4070STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4071STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4072                UINT_MAX, 0);
4073STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4074#undef STORE_FUNCTION
4075
4076#define CFQ_ATTR(name) \
4077        __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4078
4079static struct elv_fs_entry cfq_attrs[] = {
4080        CFQ_ATTR(quantum),
4081        CFQ_ATTR(fifo_expire_sync),
4082        CFQ_ATTR(fifo_expire_async),
4083        CFQ_ATTR(back_seek_max),
4084        CFQ_ATTR(back_seek_penalty),
4085        CFQ_ATTR(slice_sync),
4086        CFQ_ATTR(slice_async),
4087        CFQ_ATTR(slice_async_rq),
4088        CFQ_ATTR(slice_idle),
4089        CFQ_ATTR(group_idle),
4090        CFQ_ATTR(low_latency),
4091        __ATTR_NULL
4092};
4093
4094static struct elevator_type iosched_cfq = {
4095        .ops = {
4096                .elevator_merge_fn =            cfq_merge,
4097                .elevator_merged_fn =           cfq_merged_request,
4098                .elevator_merge_req_fn =        cfq_merged_requests,
4099                .elevator_allow_merge_fn =      cfq_allow_merge,
4100                .elevator_bio_merged_fn =       cfq_bio_merged,
4101                .elevator_dispatch_fn =         cfq_dispatch_requests,
4102                .elevator_add_req_fn =          cfq_insert_request,
4103                .elevator_activate_req_fn =     cfq_activate_request,
4104                .elevator_deactivate_req_fn =   cfq_deactivate_request,
4105                .elevator_completed_req_fn =    cfq_completed_request,
4106                .elevator_former_req_fn =       elv_rb_former_request,
4107                .elevator_latter_req_fn =       elv_rb_latter_request,
4108                .elevator_set_req_fn =          cfq_set_request,
4109                .elevator_put_req_fn =          cfq_put_request,
4110                .elevator_may_queue_fn =        cfq_may_queue,
4111                .elevator_init_fn =             cfq_init_queue,
4112                .elevator_exit_fn =             cfq_exit_queue,
4113                .trim =                         cfq_free_io_context,
4114        },
4115        .elevator_attrs =       cfq_attrs,
4116        .elevator_name =        "cfq",
4117        .elevator_owner =       THIS_MODULE,
4118};
4119
4120#ifdef CONFIG_CFQ_GROUP_IOSCHED
4121static struct blkio_policy_type blkio_policy_cfq = {
4122        .ops = {
4123                .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
4124                .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4125        },
4126        .plid = BLKIO_POLICY_PROP,
4127};
4128#else
4129static struct blkio_policy_type blkio_policy_cfq;
4130#endif
4131
4132static int __init cfq_init(void)
4133{
4134        /*
4135         * could be 0 on HZ < 1000 setups
4136         */
4137        if (!cfq_slice_async)
4138                cfq_slice_async = 1;
4139        if (!cfq_slice_idle)
4140                cfq_slice_idle = 1;
4141
4142#ifdef CONFIG_CFQ_GROUP_IOSCHED
4143        if (!cfq_group_idle)
4144                cfq_group_idle = 1;
4145#else
4146                cfq_group_idle = 0;
4147#endif
4148        if (cfq_slab_setup())
4149                return -ENOMEM;
4150
4151        elv_register(&iosched_cfq);
4152        blkio_policy_register(&blkio_policy_cfq);
4153
4154        return 0;
4155}
4156
4157static void __exit cfq_exit(void)
4158{
4159        DECLARE_COMPLETION_ONSTACK(all_gone);
4160        blkio_policy_unregister(&blkio_policy_cfq);
4161        elv_unregister(&iosched_cfq);
4162        ioc_gone = &all_gone;
4163        /* ioc_gone's update must be visible before reading ioc_count */
4164        smp_wmb();
4165
4166        /*
4167         * this also protects us from entering cfq_slab_kill() with
4168         * pending RCU callbacks
4169         */
4170        if (elv_ioc_count_read(cfq_ioc_count))
4171                wait_for_completion(&all_gone);
4172        ida_destroy(&cic_index_ida);
4173        cfq_slab_kill();
4174}
4175
4176module_init(cfq_init);
4177module_exit(cfq_exit);
4178
4179MODULE_AUTHOR("Jens Axboe");
4180MODULE_LICENSE("GPL");
4181MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
4182