linux/block/cfq-iosched.c
<<
>>
Prefs
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
  17#include "blk.h"
  18#include "blk-cgroup.h"
  19
  20/*
  21 * tunables
  22 */
  23/* max queue in one round of service */
  24static const int cfq_quantum = 8;
  25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26/* maximum backwards seek, in KiB */
  27static const int cfq_back_max = 16 * 1024;
  28/* penalty of a backwards seek */
  29static const int cfq_back_penalty = 2;
  30static const int cfq_slice_sync = HZ / 10;
  31static int cfq_slice_async = HZ / 25;
  32static const int cfq_slice_async_rq = 2;
  33static int cfq_slice_idle = HZ / 125;
  34static int cfq_group_idle = HZ / 125;
  35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36static const int cfq_hist_divisor = 4;
  37
  38/*
  39 * offset from end of service tree
  40 */
  41#define CFQ_IDLE_DELAY          (HZ / 5)
  42
  43/*
  44 * below this threshold, we consider thinktime immediate
  45 */
  46#define CFQ_MIN_TT              (2)
  47
  48#define CFQ_SLICE_SCALE         (5)
  49#define CFQ_HW_QUEUE_MIN        (5)
  50#define CFQ_SERVICE_SHIFT       12
  51
  52#define CFQQ_SEEK_THR           (sector_t)(8 * 100)
  53#define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
  54#define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
  55#define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
  56
  57#define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
  58#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
  59#define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
  60
  61static struct kmem_cache *cfq_pool;
  62
  63#define CFQ_PRIO_LISTS          IOPRIO_BE_NR
  64#define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  65#define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  66
  67#define sample_valid(samples)   ((samples) > 80)
  68#define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
  69
  70struct cfq_ttime {
  71        unsigned long last_end_request;
  72
  73        unsigned long ttime_total;
  74        unsigned long ttime_samples;
  75        unsigned long ttime_mean;
  76};
  77
  78/*
  79 * Most of our rbtree usage is for sorting with min extraction, so
  80 * if we cache the leftmost node we don't have to walk down the tree
  81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  82 * move this into the elevator for the rq sorting as well.
  83 */
  84struct cfq_rb_root {
  85        struct rb_root rb;
  86        struct rb_node *left;
  87        unsigned count;
  88        u64 min_vdisktime;
  89        struct cfq_ttime ttime;
  90};
  91#define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
  92                        .ttime = {.last_end_request = jiffies,},}
  93
  94/*
  95 * Per process-grouping structure
  96 */
  97struct cfq_queue {
  98        /* reference count */
  99        int ref;
 100        /* various state flags, see below */
 101        unsigned int flags;
 102        /* parent cfq_data */
 103        struct cfq_data *cfqd;
 104        /* service_tree member */
 105        struct rb_node rb_node;
 106        /* service_tree key */
 107        unsigned long rb_key;
 108        /* prio tree member */
 109        struct rb_node p_node;
 110        /* prio tree root we belong to, if any */
 111        struct rb_root *p_root;
 112        /* sorted list of pending requests */
 113        struct rb_root sort_list;
 114        /* if fifo isn't expired, next request to serve */
 115        struct request *next_rq;
 116        /* requests queued in sort_list */
 117        int queued[2];
 118        /* currently allocated requests */
 119        int allocated[2];
 120        /* fifo list of requests in sort_list */
 121        struct list_head fifo;
 122
 123        /* time when queue got scheduled in to dispatch first request. */
 124        unsigned long dispatch_start;
 125        unsigned int allocated_slice;
 126        unsigned int slice_dispatch;
 127        /* time when first request from queue completed and slice started. */
 128        unsigned long slice_start;
 129        unsigned long slice_end;
 130        long slice_resid;
 131
 132        /* pending priority requests */
 133        int prio_pending;
 134        /* number of requests that are on the dispatch list or inside driver */
 135        int dispatched;
 136
 137        /* io prio of this group */
 138        unsigned short ioprio, org_ioprio;
 139        unsigned short ioprio_class;
 140
 141        pid_t pid;
 142
 143        u32 seek_history;
 144        sector_t last_request_pos;
 145
 146        struct cfq_rb_root *service_tree;
 147        struct cfq_queue *new_cfqq;
 148        struct cfq_group *cfqg;
 149        /* Number of sectors dispatched from queue in single dispatch round */
 150        unsigned long nr_sectors;
 151};
 152
 153/*
 154 * First index in the service_trees.
 155 * IDLE is handled separately, so it has negative index
 156 */
 157enum wl_class_t {
 158        BE_WORKLOAD = 0,
 159        RT_WORKLOAD = 1,
 160        IDLE_WORKLOAD = 2,
 161        CFQ_PRIO_NR,
 162};
 163
 164/*
 165 * Second index in the service_trees.
 166 */
 167enum wl_type_t {
 168        ASYNC_WORKLOAD = 0,
 169        SYNC_NOIDLE_WORKLOAD = 1,
 170        SYNC_WORKLOAD = 2
 171};
 172
 173struct cfqg_stats {
 174#ifdef CONFIG_CFQ_GROUP_IOSCHED
 175        /* total bytes transferred */
 176        struct blkg_rwstat              service_bytes;
 177        /* total IOs serviced, post merge */
 178        struct blkg_rwstat              serviced;
 179        /* number of ios merged */
 180        struct blkg_rwstat              merged;
 181        /* total time spent on device in ns, may not be accurate w/ queueing */
 182        struct blkg_rwstat              service_time;
 183        /* total time spent waiting in scheduler queue in ns */
 184        struct blkg_rwstat              wait_time;
 185        /* number of IOs queued up */
 186        struct blkg_rwstat              queued;
 187        /* total sectors transferred */
 188        struct blkg_stat                sectors;
 189        /* total disk time and nr sectors dispatched by this group */
 190        struct blkg_stat                time;
 191#ifdef CONFIG_DEBUG_BLK_CGROUP
 192        /* time not charged to this cgroup */
 193        struct blkg_stat                unaccounted_time;
 194        /* sum of number of ios queued across all samples */
 195        struct blkg_stat                avg_queue_size_sum;
 196        /* count of samples taken for average */
 197        struct blkg_stat                avg_queue_size_samples;
 198        /* how many times this group has been removed from service tree */
 199        struct blkg_stat                dequeue;
 200        /* total time spent waiting for it to be assigned a timeslice. */
 201        struct blkg_stat                group_wait_time;
 202        /* time spent idling for this blkcg_gq */
 203        struct blkg_stat                idle_time;
 204        /* total time with empty current active q with other requests queued */
 205        struct blkg_stat                empty_time;
 206        /* fields after this shouldn't be cleared on stat reset */
 207        uint64_t                        start_group_wait_time;
 208        uint64_t                        start_idle_time;
 209        uint64_t                        start_empty_time;
 210        uint16_t                        flags;
 211#endif  /* CONFIG_DEBUG_BLK_CGROUP */
 212#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 213};
 214
 215/* This is per cgroup per device grouping structure */
 216struct cfq_group {
 217        /* must be the first member */
 218        struct blkg_policy_data pd;
 219
 220        /* group service_tree member */
 221        struct rb_node rb_node;
 222
 223        /* group service_tree key */
 224        u64 vdisktime;
 225
 226        /*
 227         * The number of active cfqgs and sum of their weights under this
 228         * cfqg.  This covers this cfqg's leaf_weight and all children's
 229         * weights, but does not cover weights of further descendants.
 230         *
 231         * If a cfqg is on the service tree, it's active.  An active cfqg
 232         * also activates its parent and contributes to the children_weight
 233         * of the parent.
 234         */
 235        int nr_active;
 236        unsigned int children_weight;
 237
 238        /*
 239         * vfraction is the fraction of vdisktime that the tasks in this
 240         * cfqg are entitled to.  This is determined by compounding the
 241         * ratios walking up from this cfqg to the root.
 242         *
 243         * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
 244         * vfractions on a service tree is approximately 1.  The sum may
 245         * deviate a bit due to rounding errors and fluctuations caused by
 246         * cfqgs entering and leaving the service tree.
 247         */
 248        unsigned int vfraction;
 249
 250        /*
 251         * There are two weights - (internal) weight is the weight of this
 252         * cfqg against the sibling cfqgs.  leaf_weight is the wight of
 253         * this cfqg against the child cfqgs.  For the root cfqg, both
 254         * weights are kept in sync for backward compatibility.
 255         */
 256        unsigned int weight;
 257        unsigned int new_weight;
 258        unsigned int dev_weight;
 259
 260        unsigned int leaf_weight;
 261        unsigned int new_leaf_weight;
 262        unsigned int dev_leaf_weight;
 263
 264        /* number of cfqq currently on this group */
 265        int nr_cfqq;
 266
 267        /*
 268         * Per group busy queues average. Useful for workload slice calc. We
 269         * create the array for each prio class but at run time it is used
 270         * only for RT and BE class and slot for IDLE class remains unused.
 271         * This is primarily done to avoid confusion and a gcc warning.
 272         */
 273        unsigned int busy_queues_avg[CFQ_PRIO_NR];
 274        /*
 275         * rr lists of queues with requests. We maintain service trees for
 276         * RT and BE classes. These trees are subdivided in subclasses
 277         * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 278         * class there is no subclassification and all the cfq queues go on
 279         * a single tree service_tree_idle.
 280         * Counts are embedded in the cfq_rb_root
 281         */
 282        struct cfq_rb_root service_trees[2][3];
 283        struct cfq_rb_root service_tree_idle;
 284
 285        unsigned long saved_wl_slice;
 286        enum wl_type_t saved_wl_type;
 287        enum wl_class_t saved_wl_class;
 288
 289        /* number of requests that are on the dispatch list or inside driver */
 290        int dispatched;
 291        struct cfq_ttime ttime;
 292        struct cfqg_stats stats;        /* stats for this cfqg */
 293        struct cfqg_stats dead_stats;   /* stats pushed from dead children */
 294};
 295
 296struct cfq_io_cq {
 297        struct io_cq            icq;            /* must be the first member */
 298        struct cfq_queue        *cfqq[2];
 299        struct cfq_ttime        ttime;
 300        int                     ioprio;         /* the current ioprio */
 301#ifdef CONFIG_CFQ_GROUP_IOSCHED
 302        uint64_t                blkcg_id;       /* the current blkcg ID */
 303#endif
 304};
 305
 306/*
 307 * Per block device queue structure
 308 */
 309struct cfq_data {
 310        struct request_queue *queue;
 311        /* Root service tree for cfq_groups */
 312        struct cfq_rb_root grp_service_tree;
 313        struct cfq_group *root_group;
 314
 315        /*
 316         * The priority currently being served
 317         */
 318        enum wl_class_t serving_wl_class;
 319        enum wl_type_t serving_wl_type;
 320        unsigned long workload_expires;
 321        struct cfq_group *serving_group;
 322
 323        /*
 324         * Each priority tree is sorted by next_request position.  These
 325         * trees are used when determining if two or more queues are
 326         * interleaving requests (see cfq_close_cooperator).
 327         */
 328        struct rb_root prio_trees[CFQ_PRIO_LISTS];
 329
 330        unsigned int busy_queues;
 331        unsigned int busy_sync_queues;
 332
 333        int rq_in_driver;
 334        int rq_in_flight[2];
 335
 336        /*
 337         * queue-depth detection
 338         */
 339        int rq_queued;
 340        int hw_tag;
 341        /*
 342         * hw_tag can be
 343         * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 344         *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 345         *  0 => no NCQ
 346         */
 347        int hw_tag_est_depth;
 348        unsigned int hw_tag_samples;
 349
 350        /*
 351         * idle window management
 352         */
 353        struct timer_list idle_slice_timer;
 354        struct work_struct unplug_work;
 355
 356        struct cfq_queue *active_queue;
 357        struct cfq_io_cq *active_cic;
 358
 359        /*
 360         * async queue for each priority case
 361         */
 362        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 363        struct cfq_queue *async_idle_cfqq;
 364
 365        sector_t last_position;
 366
 367        /*
 368         * tunables, see top of file
 369         */
 370        unsigned int cfq_quantum;
 371        unsigned int cfq_fifo_expire[2];
 372        unsigned int cfq_back_penalty;
 373        unsigned int cfq_back_max;
 374        unsigned int cfq_slice[2];
 375        unsigned int cfq_slice_async_rq;
 376        unsigned int cfq_slice_idle;
 377        unsigned int cfq_group_idle;
 378        unsigned int cfq_latency;
 379        unsigned int cfq_target_latency;
 380
 381        /*
 382         * Fallback dummy cfqq for extreme OOM conditions
 383         */
 384        struct cfq_queue oom_cfqq;
 385
 386        unsigned long last_delayed_sync;
 387};
 388
 389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 390
 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
 392                                            enum wl_class_t class,
 393                                            enum wl_type_t type)
 394{
 395        if (!cfqg)
 396                return NULL;
 397
 398        if (class == IDLE_WORKLOAD)
 399                return &cfqg->service_tree_idle;
 400
 401        return &cfqg->service_trees[class][type];
 402}
 403
 404enum cfqq_state_flags {
 405        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
 406        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
 407        CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
 408        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
 409        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 410        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
 411        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
 412        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
 413        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 414        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
 415        CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
 416        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
 417        CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
 418};
 419
 420#define CFQ_CFQQ_FNS(name)                                              \
 421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
 422{                                                                       \
 423        (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 424}                                                                       \
 425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
 426{                                                                       \
 427        (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 428}                                                                       \
 429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
 430{                                                                       \
 431        return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 432}
 433
 434CFQ_CFQQ_FNS(on_rr);
 435CFQ_CFQQ_FNS(wait_request);
 436CFQ_CFQQ_FNS(must_dispatch);
 437CFQ_CFQQ_FNS(must_alloc_slice);
 438CFQ_CFQQ_FNS(fifo_expire);
 439CFQ_CFQQ_FNS(idle_window);
 440CFQ_CFQQ_FNS(prio_changed);
 441CFQ_CFQQ_FNS(slice_new);
 442CFQ_CFQQ_FNS(sync);
 443CFQ_CFQQ_FNS(coop);
 444CFQ_CFQQ_FNS(split_coop);
 445CFQ_CFQQ_FNS(deep);
 446CFQ_CFQQ_FNS(wait_busy);
 447#undef CFQ_CFQQ_FNS
 448
 449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 450{
 451        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 452}
 453
 454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 455{
 456        return pd_to_blkg(&cfqg->pd);
 457}
 458
 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 460
 461/* cfqg stats flags */
 462enum cfqg_stats_flags {
 463        CFQG_stats_waiting = 0,
 464        CFQG_stats_idling,
 465        CFQG_stats_empty,
 466};
 467
 468#define CFQG_FLAG_FNS(name)                                             \
 469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
 470{                                                                       \
 471        stats->flags |= (1 << CFQG_stats_##name);                       \
 472}                                                                       \
 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
 474{                                                                       \
 475        stats->flags &= ~(1 << CFQG_stats_##name);                      \
 476}                                                                       \
 477static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
 478{                                                                       \
 479        return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
 480}                                                                       \
 481
 482CFQG_FLAG_FNS(waiting)
 483CFQG_FLAG_FNS(idling)
 484CFQG_FLAG_FNS(empty)
 485#undef CFQG_FLAG_FNS
 486
 487/* This should be called with the queue_lock held. */
 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 489{
 490        unsigned long long now;
 491
 492        if (!cfqg_stats_waiting(stats))
 493                return;
 494
 495        now = sched_clock();
 496        if (time_after64(now, stats->start_group_wait_time))
 497                blkg_stat_add(&stats->group_wait_time,
 498                              now - stats->start_group_wait_time);
 499        cfqg_stats_clear_waiting(stats);
 500}
 501
 502/* This should be called with the queue_lock held. */
 503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 504                                                 struct cfq_group *curr_cfqg)
 505{
 506        struct cfqg_stats *stats = &cfqg->stats;
 507
 508        if (cfqg_stats_waiting(stats))
 509                return;
 510        if (cfqg == curr_cfqg)
 511                return;
 512        stats->start_group_wait_time = sched_clock();
 513        cfqg_stats_mark_waiting(stats);
 514}
 515
 516/* This should be called with the queue_lock held. */
 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 518{
 519        unsigned long long now;
 520
 521        if (!cfqg_stats_empty(stats))
 522                return;
 523
 524        now = sched_clock();
 525        if (time_after64(now, stats->start_empty_time))
 526                blkg_stat_add(&stats->empty_time,
 527                              now - stats->start_empty_time);
 528        cfqg_stats_clear_empty(stats);
 529}
 530
 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 532{
 533        blkg_stat_add(&cfqg->stats.dequeue, 1);
 534}
 535
 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 537{
 538        struct cfqg_stats *stats = &cfqg->stats;
 539
 540        if (blkg_rwstat_total(&stats->queued))
 541                return;
 542
 543        /*
 544         * group is already marked empty. This can happen if cfqq got new
 545         * request in parent group and moved to this group while being added
 546         * to service tree. Just ignore the event and move on.
 547         */
 548        if (cfqg_stats_empty(stats))
 549                return;
 550
 551        stats->start_empty_time = sched_clock();
 552        cfqg_stats_mark_empty(stats);
 553}
 554
 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 556{
 557        struct cfqg_stats *stats = &cfqg->stats;
 558
 559        if (cfqg_stats_idling(stats)) {
 560                unsigned long long now = sched_clock();
 561
 562                if (time_after64(now, stats->start_idle_time))
 563                        blkg_stat_add(&stats->idle_time,
 564                                      now - stats->start_idle_time);
 565                cfqg_stats_clear_idling(stats);
 566        }
 567}
 568
 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 570{
 571        struct cfqg_stats *stats = &cfqg->stats;
 572
 573        BUG_ON(cfqg_stats_idling(stats));
 574
 575        stats->start_idle_time = sched_clock();
 576        cfqg_stats_mark_idling(stats);
 577}
 578
 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 580{
 581        struct cfqg_stats *stats = &cfqg->stats;
 582
 583        blkg_stat_add(&stats->avg_queue_size_sum,
 584                      blkg_rwstat_total(&stats->queued));
 585        blkg_stat_add(&stats->avg_queue_size_samples, 1);
 586        cfqg_stats_update_group_wait_time(stats);
 587}
 588
 589#else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 590
 591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 598
 599#endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 600
 601#ifdef CONFIG_CFQ_GROUP_IOSCHED
 602
 603static struct blkcg_policy blkcg_policy_cfq;
 604
 605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 606{
 607        return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 608}
 609
 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
 611{
 612        struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
 613
 614        return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 615}
 616
 617static inline void cfqg_get(struct cfq_group *cfqg)
 618{
 619        return blkg_get(cfqg_to_blkg(cfqg));
 620}
 621
 622static inline void cfqg_put(struct cfq_group *cfqg)
 623{
 624        return blkg_put(cfqg_to_blkg(cfqg));
 625}
 626
 627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  do {                    \
 628        char __pbuf[128];                                               \
 629                                                                        \
 630        blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
 631        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
 632                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 633                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 634                          __pbuf, ##args);                              \
 635} while (0)
 636
 637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)  do {                    \
 638        char __pbuf[128];                                               \
 639                                                                        \
 640        blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
 641        blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
 642} while (0)
 643
 644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 645                                            struct cfq_group *curr_cfqg, int rw)
 646{
 647        blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 648        cfqg_stats_end_empty_time(&cfqg->stats);
 649        cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 650}
 651
 652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 653                        unsigned long time, unsigned long unaccounted_time)
 654{
 655        blkg_stat_add(&cfqg->stats.time, time);
 656#ifdef CONFIG_DEBUG_BLK_CGROUP
 657        blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 658#endif
 659}
 660
 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 662{
 663        blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 664}
 665
 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 667{
 668        blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 669}
 670
 671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 672                                              uint64_t bytes, int rw)
 673{
 674        blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
 675        blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
 676        blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
 677}
 678
 679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 680                        uint64_t start_time, uint64_t io_start_time, int rw)
 681{
 682        struct cfqg_stats *stats = &cfqg->stats;
 683        unsigned long long now = sched_clock();
 684
 685        if (time_after64(now, io_start_time))
 686                blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 687        if (time_after64(io_start_time, start_time))
 688                blkg_rwstat_add(&stats->wait_time, rw,
 689                                io_start_time - start_time);
 690}
 691
 692/* @stats = 0 */
 693static void cfqg_stats_reset(struct cfqg_stats *stats)
 694{
 695        /* queued stats shouldn't be cleared */
 696        blkg_rwstat_reset(&stats->service_bytes);
 697        blkg_rwstat_reset(&stats->serviced);
 698        blkg_rwstat_reset(&stats->merged);
 699        blkg_rwstat_reset(&stats->service_time);
 700        blkg_rwstat_reset(&stats->wait_time);
 701        blkg_stat_reset(&stats->time);
 702#ifdef CONFIG_DEBUG_BLK_CGROUP
 703        blkg_stat_reset(&stats->unaccounted_time);
 704        blkg_stat_reset(&stats->avg_queue_size_sum);
 705        blkg_stat_reset(&stats->avg_queue_size_samples);
 706        blkg_stat_reset(&stats->dequeue);
 707        blkg_stat_reset(&stats->group_wait_time);
 708        blkg_stat_reset(&stats->idle_time);
 709        blkg_stat_reset(&stats->empty_time);
 710#endif
 711}
 712
 713/* @to += @from */
 714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
 715{
 716        /* queued stats shouldn't be cleared */
 717        blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
 718        blkg_rwstat_merge(&to->serviced, &from->serviced);
 719        blkg_rwstat_merge(&to->merged, &from->merged);
 720        blkg_rwstat_merge(&to->service_time, &from->service_time);
 721        blkg_rwstat_merge(&to->wait_time, &from->wait_time);
 722        blkg_stat_merge(&from->time, &from->time);
 723#ifdef CONFIG_DEBUG_BLK_CGROUP
 724        blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
 725        blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 726        blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
 727        blkg_stat_merge(&to->dequeue, &from->dequeue);
 728        blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
 729        blkg_stat_merge(&to->idle_time, &from->idle_time);
 730        blkg_stat_merge(&to->empty_time, &from->empty_time);
 731#endif
 732}
 733
 734/*
 735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
 736 * recursive stats can still account for the amount used by this cfqg after
 737 * it's gone.
 738 */
 739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 740{
 741        struct cfq_group *parent = cfqg_parent(cfqg);
 742
 743        lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
 744
 745        if (unlikely(!parent))
 746                return;
 747
 748        cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
 749        cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
 750        cfqg_stats_reset(&cfqg->stats);
 751        cfqg_stats_reset(&cfqg->dead_stats);
 752}
 753
 754#else   /* CONFIG_CFQ_GROUP_IOSCHED */
 755
 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 757static inline void cfqg_get(struct cfq_group *cfqg) { }
 758static inline void cfqg_put(struct cfq_group *cfqg) { }
 759
 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 761        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
 762                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 763                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 764                                ##args)
 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
 766
 767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 768                        struct cfq_group *curr_cfqg, int rw) { }
 769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 770                        unsigned long time, unsigned long unaccounted_time) { }
 771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 774                                              uint64_t bytes, int rw) { }
 775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 776                        uint64_t start_time, uint64_t io_start_time, int rw) { }
 777
 778#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 779
 780#define cfq_log(cfqd, fmt, args...)     \
 781        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 782
 783/* Traverses through cfq group service trees */
 784#define for_each_cfqg_st(cfqg, i, j, st) \
 785        for (i = 0; i <= IDLE_WORKLOAD; i++) \
 786                for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 787                        : &cfqg->service_tree_idle; \
 788                        (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 789                        (i == IDLE_WORKLOAD && j == 0); \
 790                        j++, st = i < IDLE_WORKLOAD ? \
 791                        &cfqg->service_trees[i][j]: NULL) \
 792
 793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 794        struct cfq_ttime *ttime, bool group_idle)
 795{
 796        unsigned long slice;
 797        if (!sample_valid(ttime->ttime_samples))
 798                return false;
 799        if (group_idle)
 800                slice = cfqd->cfq_group_idle;
 801        else
 802                slice = cfqd->cfq_slice_idle;
 803        return ttime->ttime_mean > slice;
 804}
 805
 806static inline bool iops_mode(struct cfq_data *cfqd)
 807{
 808        /*
 809         * If we are not idling on queues and it is a NCQ drive, parallel
 810         * execution of requests is on and measuring time is not possible
 811         * in most of the cases until and unless we drive shallower queue
 812         * depths and that becomes a performance bottleneck. In such cases
 813         * switch to start providing fairness in terms of number of IOs.
 814         */
 815        if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 816                return true;
 817        else
 818                return false;
 819}
 820
 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 822{
 823        if (cfq_class_idle(cfqq))
 824                return IDLE_WORKLOAD;
 825        if (cfq_class_rt(cfqq))
 826                return RT_WORKLOAD;
 827        return BE_WORKLOAD;
 828}
 829
 830
 831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 832{
 833        if (!cfq_cfqq_sync(cfqq))
 834                return ASYNC_WORKLOAD;
 835        if (!cfq_cfqq_idle_window(cfqq))
 836                return SYNC_NOIDLE_WORKLOAD;
 837        return SYNC_WORKLOAD;
 838}
 839
 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
 841                                        struct cfq_data *cfqd,
 842                                        struct cfq_group *cfqg)
 843{
 844        if (wl_class == IDLE_WORKLOAD)
 845                return cfqg->service_tree_idle.count;
 846
 847        return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
 848                cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
 849                cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 850}
 851
 852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 853                                        struct cfq_group *cfqg)
 854{
 855        return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
 856                cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 857}
 858
 859static void cfq_dispatch_insert(struct request_queue *, struct request *);
 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 861                                       struct cfq_io_cq *cic, struct bio *bio,
 862                                       gfp_t gfp_mask);
 863
 864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 865{
 866        /* cic->icq is the first member, %NULL will convert to %NULL */
 867        return container_of(icq, struct cfq_io_cq, icq);
 868}
 869
 870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 871                                               struct io_context *ioc)
 872{
 873        if (ioc)
 874                return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 875        return NULL;
 876}
 877
 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 879{
 880        return cic->cfqq[is_sync];
 881}
 882
 883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 884                                bool is_sync)
 885{
 886        cic->cfqq[is_sync] = cfqq;
 887}
 888
 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 890{
 891        return cic->icq.q->elevator->elevator_data;
 892}
 893
 894/*
 895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 896 * set (in which case it could also be direct WRITE).
 897 */
 898static inline bool cfq_bio_sync(struct bio *bio)
 899{
 900        return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 901}
 902
 903/*
 904 * scheduler run of queue, if there are requests pending and no one in the
 905 * driver that will restart queueing
 906 */
 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 908{
 909        if (cfqd->busy_queues) {
 910                cfq_log(cfqd, "schedule dispatch");
 911                kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 912        }
 913}
 914
 915/*
 916 * Scale schedule slice based on io priority. Use the sync time slice only
 917 * if a queue is marked sync and has sync io queued. A sync queue with async
 918 * io only, should not get full sync slice length.
 919 */
 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 921                                 unsigned short prio)
 922{
 923        const int base_slice = cfqd->cfq_slice[sync];
 924
 925        WARN_ON(prio >= IOPRIO_BE_NR);
 926
 927        return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 928}
 929
 930static inline int
 931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 932{
 933        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 934}
 935
 936/**
 937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
 938 * @charge: disk time being charged
 939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
 940 *
 941 * Scale @charge according to @vfraction, which is in range (0, 1].  The
 942 * scaling is inversely proportional.
 943 *
 944 * scaled = charge / vfraction
 945 *
 946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
 947 */
 948static inline u64 cfqg_scale_charge(unsigned long charge,
 949                                    unsigned int vfraction)
 950{
 951        u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
 952
 953        /* charge / vfraction */
 954        c <<= CFQ_SERVICE_SHIFT;
 955        do_div(c, vfraction);
 956        return c;
 957}
 958
 959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 960{
 961        s64 delta = (s64)(vdisktime - min_vdisktime);
 962        if (delta > 0)
 963                min_vdisktime = vdisktime;
 964
 965        return min_vdisktime;
 966}
 967
 968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 969{
 970        s64 delta = (s64)(vdisktime - min_vdisktime);
 971        if (delta < 0)
 972                min_vdisktime = vdisktime;
 973
 974        return min_vdisktime;
 975}
 976
 977static void update_min_vdisktime(struct cfq_rb_root *st)
 978{
 979        struct cfq_group *cfqg;
 980
 981        if (st->left) {
 982                cfqg = rb_entry_cfqg(st->left);
 983                st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 984                                                  cfqg->vdisktime);
 985        }
 986}
 987
 988/*
 989 * get averaged number of queues of RT/BE priority.
 990 * average is updated, with a formula that gives more weight to higher numbers,
 991 * to quickly follows sudden increases and decrease slowly
 992 */
 993
 994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 995                                        struct cfq_group *cfqg, bool rt)
 996{
 997        unsigned min_q, max_q;
 998        unsigned mult  = cfq_hist_divisor - 1;
 999        unsigned round = cfq_hist_divisor / 2;
1000        unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1001
1002        min_q = min(cfqg->busy_queues_avg[rt], busy);
1003        max_q = max(cfqg->busy_queues_avg[rt], busy);
1004        cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1005                cfq_hist_divisor;
1006        return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
1012        return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1013}
1014
1015static inline unsigned
1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1017{
1018        unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019        if (cfqd->cfq_latency) {
1020                /*
1021                 * interested queues (we consider only the ones with the same
1022                 * priority class in the cfq group)
1023                 */
1024                unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025                                                cfq_class_rt(cfqq));
1026                unsigned sync_slice = cfqd->cfq_slice[1];
1027                unsigned expect_latency = sync_slice * iq;
1028                unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030                if (expect_latency > group_slice) {
1031                        unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032                        /* scale low_slice according to IO priority
1033                         * and sync vs async */
1034                        unsigned low_slice =
1035                                min(slice, base_low_slice * slice / sync_slice);
1036                        /* the adapted slice value is scaled to fit all iqs
1037                         * into the target latency */
1038                        slice = max(slice * group_slice / expect_latency,
1039                                    low_slice);
1040                }
1041        }
1042        return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
1048        unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1049
1050        cfqq->slice_start = jiffies;
1051        cfqq->slice_end = jiffies + slice;
1052        cfqq->allocated_slice = slice;
1053        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
1061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1062{
1063        if (cfq_cfqq_slice_new(cfqq))
1064                return false;
1065        if (time_before(jiffies, cfqq->slice_end))
1066                return false;
1067
1068        return true;
1069}
1070
1071/*
1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1073 * We choose the request that is closest to the head right now. Distance
1074 * behind the head is penalized and only allowed to a certain extent.
1075 */
1076static struct request *
1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1078{
1079        sector_t s1, s2, d1 = 0, d2 = 0;
1080        unsigned long back_max;
1081#define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
1083        unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1084
1085        if (rq1 == NULL || rq1 == rq2)
1086                return rq2;
1087        if (rq2 == NULL)
1088                return rq1;
1089
1090        if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091                return rq_is_sync(rq1) ? rq1 : rq2;
1092
1093        if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094                return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1095
1096        s1 = blk_rq_pos(rq1);
1097        s2 = blk_rq_pos(rq2);
1098
1099        /*
1100         * by definition, 1KiB is 2 sectors
1101         */
1102        back_max = cfqd->cfq_back_max * 2;
1103
1104        /*
1105         * Strict one way elevator _except_ in the case where we allow
1106         * short backward seeks which are biased as twice the cost of a
1107         * similar forward seek.
1108         */
1109        if (s1 >= last)
1110                d1 = s1 - last;
1111        else if (s1 + back_max >= last)
1112                d1 = (last - s1) * cfqd->cfq_back_penalty;
1113        else
1114                wrap |= CFQ_RQ1_WRAP;
1115
1116        if (s2 >= last)
1117                d2 = s2 - last;
1118        else if (s2 + back_max >= last)
1119                d2 = (last - s2) * cfqd->cfq_back_penalty;
1120        else
1121                wrap |= CFQ_RQ2_WRAP;
1122
1123        /* Found required data */
1124
1125        /*
1126         * By doing switch() on the bit mask "wrap" we avoid having to
1127         * check two variables for all permutations: --> faster!
1128         */
1129        switch (wrap) {
1130        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1131                if (d1 < d2)
1132                        return rq1;
1133                else if (d2 < d1)
1134                        return rq2;
1135                else {
1136                        if (s1 >= s2)
1137                                return rq1;
1138                        else
1139                                return rq2;
1140                }
1141
1142        case CFQ_RQ2_WRAP:
1143                return rq1;
1144        case CFQ_RQ1_WRAP:
1145                return rq2;
1146        case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1147        default:
1148                /*
1149                 * Since both rqs are wrapped,
1150                 * start with the one that's further behind head
1151                 * (--> only *one* back seek required),
1152                 * since back seek takes more time than forward.
1153                 */
1154                if (s1 <= s2)
1155                        return rq1;
1156                else
1157                        return rq2;
1158        }
1159}
1160
1161/*
1162 * The below is leftmost cache rbtree addon
1163 */
1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1165{
1166        /* Service tree is empty */
1167        if (!root->count)
1168                return NULL;
1169
1170        if (!root->left)
1171                root->left = rb_first(&root->rb);
1172
1173        if (root->left)
1174                return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176        return NULL;
1177}
1178
1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181        if (!root->left)
1182                root->left = rb_first(&root->rb);
1183
1184        if (root->left)
1185                return rb_entry_cfqg(root->left);
1186
1187        return NULL;
1188}
1189
1190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192        rb_erase(n, root);
1193        RB_CLEAR_NODE(n);
1194}
1195
1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198        if (root->left == n)
1199                root->left = NULL;
1200        rb_erase_init(n, &root->rb);
1201        --root->count;
1202}
1203
1204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
1207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209                  struct request *last)
1210{
1211        struct rb_node *rbnext = rb_next(&last->rb_node);
1212        struct rb_node *rbprev = rb_prev(&last->rb_node);
1213        struct request *next = NULL, *prev = NULL;
1214
1215        BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1216
1217        if (rbprev)
1218                prev = rb_entry_rq(rbprev);
1219
1220        if (rbnext)
1221                next = rb_entry_rq(rbnext);
1222        else {
1223                rbnext = rb_first(&cfqq->sort_list);
1224                if (rbnext && rbnext != &last->rb_node)
1225                        next = rb_entry_rq(rbnext);
1226        }
1227
1228        return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1229}
1230
1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232                                      struct cfq_queue *cfqq)
1233{
1234        /*
1235         * just an approximation, should be ok.
1236         */
1237        return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1238                       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1239}
1240
1241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244        return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250        struct rb_node **node = &st->rb.rb_node;
1251        struct rb_node *parent = NULL;
1252        struct cfq_group *__cfqg;
1253        s64 key = cfqg_key(st, cfqg);
1254        int left = 1;
1255
1256        while (*node != NULL) {
1257                parent = *node;
1258                __cfqg = rb_entry_cfqg(parent);
1259
1260                if (key < cfqg_key(st, __cfqg))
1261                        node = &parent->rb_left;
1262                else {
1263                        node = &parent->rb_right;
1264                        left = 0;
1265                }
1266        }
1267
1268        if (left)
1269                st->left = &cfqg->rb_node;
1270
1271        rb_link_node(&cfqg->rb_node, parent, node);
1272        rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
1275static void
1276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
1278        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1279
1280        if (cfqg->new_weight) {
1281                cfqg->weight = cfqg->new_weight;
1282                cfqg->new_weight = 0;
1283        }
1284
1285        if (cfqg->new_leaf_weight) {
1286                cfqg->leaf_weight = cfqg->new_leaf_weight;
1287                cfqg->new_leaf_weight = 0;
1288        }
1289}
1290
1291static void
1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1293{
1294        unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
1295        struct cfq_group *pos = cfqg;
1296        struct cfq_group *parent;
1297        bool propagate;
1298
1299        /* add to the service tree */
1300        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301
1302        cfq_update_group_weight(cfqg);
1303        __cfq_group_service_tree_add(st, cfqg);
1304
1305        /*
1306         * Activate @cfqg and calculate the portion of vfraction @cfqg is
1307         * entitled to.  vfraction is calculated by walking the tree
1308         * towards the root calculating the fraction it has at each level.
1309         * The compounded ratio is how much vfraction @cfqg owns.
1310         *
1311         * Start with the proportion tasks in this cfqg has against active
1312         * children cfqgs - its leaf_weight against children_weight.
1313         */
1314        propagate = !pos->nr_active++;
1315        pos->children_weight += pos->leaf_weight;
1316        vfr = vfr * pos->leaf_weight / pos->children_weight;
1317
1318        /*
1319         * Compound ->weight walking up the tree.  Both activation and
1320         * vfraction calculation are done in the same loop.  Propagation
1321         * stops once an already activated node is met.  vfraction
1322         * calculation should always continue to the root.
1323         */
1324        while ((parent = cfqg_parent(pos))) {
1325                if (propagate) {
1326                        propagate = !parent->nr_active++;
1327                        parent->children_weight += pos->weight;
1328                }
1329                vfr = vfr * pos->weight / parent->children_weight;
1330                pos = parent;
1331        }
1332
1333        cfqg->vfraction = max_t(unsigned, vfr, 1);
1334}
1335
1336static void
1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1338{
1339        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1340        struct cfq_group *__cfqg;
1341        struct rb_node *n;
1342
1343        cfqg->nr_cfqq++;
1344        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1345                return;
1346
1347        /*
1348         * Currently put the group at the end. Later implement something
1349         * so that groups get lesser vtime based on their weights, so that
1350         * if group does not loose all if it was not continuously backlogged.
1351         */
1352        n = rb_last(&st->rb);
1353        if (n) {
1354                __cfqg = rb_entry_cfqg(n);
1355                cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1356        } else
1357                cfqg->vdisktime = st->min_vdisktime;
1358        cfq_group_service_tree_add(st, cfqg);
1359}
1360
1361static void
1362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1363{
1364        struct cfq_group *pos = cfqg;
1365        bool propagate;
1366
1367        /*
1368         * Undo activation from cfq_group_service_tree_add().  Deactivate
1369         * @cfqg and propagate deactivation upwards.
1370         */
1371        propagate = !--pos->nr_active;
1372        pos->children_weight -= pos->leaf_weight;
1373
1374        while (propagate) {
1375                struct cfq_group *parent = cfqg_parent(pos);
1376
1377                /* @pos has 0 nr_active at this point */
1378                WARN_ON_ONCE(pos->children_weight);
1379                pos->vfraction = 0;
1380
1381                if (!parent)
1382                        break;
1383
1384                propagate = !--parent->nr_active;
1385                parent->children_weight -= pos->weight;
1386                pos = parent;
1387        }
1388
1389        /* remove from the service tree */
1390        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1391                cfq_rb_erase(&cfqg->rb_node, st);
1392}
1393
1394static void
1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1396{
1397        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1398
1399        BUG_ON(cfqg->nr_cfqq < 1);
1400        cfqg->nr_cfqq--;
1401
1402        /* If there are other cfq queues under this group, don't delete it */
1403        if (cfqg->nr_cfqq)
1404                return;
1405
1406        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1407        cfq_group_service_tree_del(st, cfqg);
1408        cfqg->saved_wl_slice = 0;
1409        cfqg_stats_update_dequeue(cfqg);
1410}
1411
1412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1413                                                unsigned int *unaccounted_time)
1414{
1415        unsigned int slice_used;
1416
1417        /*
1418         * Queue got expired before even a single request completed or
1419         * got expired immediately after first request completion.
1420         */
1421        if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1422                /*
1423                 * Also charge the seek time incurred to the group, otherwise
1424                 * if there are mutiple queues in the group, each can dispatch
1425                 * a single request on seeky media and cause lots of seek time
1426                 * and group will never know it.
1427                 */
1428                slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1429                                        1);
1430        } else {
1431                slice_used = jiffies - cfqq->slice_start;
1432                if (slice_used > cfqq->allocated_slice) {
1433                        *unaccounted_time = slice_used - cfqq->allocated_slice;
1434                        slice_used = cfqq->allocated_slice;
1435                }
1436                if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1437                        *unaccounted_time += cfqq->slice_start -
1438                                        cfqq->dispatch_start;
1439        }
1440
1441        return slice_used;
1442}
1443
1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1445                                struct cfq_queue *cfqq)
1446{
1447        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1448        unsigned int used_sl, charge, unaccounted_sl = 0;
1449        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1450                        - cfqg->service_tree_idle.count;
1451        unsigned int vfr;
1452
1453        BUG_ON(nr_sync < 0);
1454        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1455
1456        if (iops_mode(cfqd))
1457                charge = cfqq->slice_dispatch;
1458        else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1459                charge = cfqq->allocated_slice;
1460
1461        /*
1462         * Can't update vdisktime while on service tree and cfqg->vfraction
1463         * is valid only while on it.  Cache vfr, leave the service tree,
1464         * update vdisktime and go back on.  The re-addition to the tree
1465         * will also update the weights as necessary.
1466         */
1467        vfr = cfqg->vfraction;
1468        cfq_group_service_tree_del(st, cfqg);
1469        cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1470        cfq_group_service_tree_add(st, cfqg);
1471
1472        /* This group is being expired. Save the context */
1473        if (time_after(cfqd->workload_expires, jiffies)) {
1474                cfqg->saved_wl_slice = cfqd->workload_expires
1475                                                - jiffies;
1476                cfqg->saved_wl_type = cfqd->serving_wl_type;
1477                cfqg->saved_wl_class = cfqd->serving_wl_class;
1478        } else
1479                cfqg->saved_wl_slice = 0;
1480
1481        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1482                                        st->min_vdisktime);
1483        cfq_log_cfqq(cfqq->cfqd, cfqq,
1484                     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1485                     used_sl, cfqq->slice_dispatch, charge,
1486                     iops_mode(cfqd), cfqq->nr_sectors);
1487        cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1488        cfqg_stats_set_start_empty_time(cfqg);
1489}
1490
1491/**
1492 * cfq_init_cfqg_base - initialize base part of a cfq_group
1493 * @cfqg: cfq_group to initialize
1494 *
1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1496 * is enabled or not.
1497 */
1498static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1499{
1500        struct cfq_rb_root *st;
1501        int i, j;
1502
1503        for_each_cfqg_st(cfqg, i, j, st)
1504                *st = CFQ_RB_ROOT;
1505        RB_CLEAR_NODE(&cfqg->rb_node);
1506
1507        cfqg->ttime.last_end_request = jiffies;
1508}
1509
1510#ifdef CONFIG_CFQ_GROUP_IOSCHED
1511static void cfq_pd_init(struct blkcg_gq *blkg)
1512{
1513        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1514
1515        cfq_init_cfqg_base(cfqg);
1516        cfqg->weight = blkg->blkcg->cfq_weight;
1517        cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
1518}
1519
1520static void cfq_pd_offline(struct blkcg_gq *blkg)
1521{
1522        /*
1523         * @blkg is going offline and will be ignored by
1524         * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1525         * that they don't get lost.  If IOs complete after this point, the
1526         * stats for them will be lost.  Oh well...
1527         */
1528        cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1529}
1530
1531/* offset delta from cfqg->stats to cfqg->dead_stats */
1532static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1533                                        offsetof(struct cfq_group, stats);
1534
1535/* to be used by recursive prfill, sums live and dead stats recursively */
1536static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1537{
1538        u64 sum = 0;
1539
1540        sum += blkg_stat_recursive_sum(pd, off);
1541        sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1542        return sum;
1543}
1544
1545/* to be used by recursive prfill, sums live and dead rwstats recursively */
1546static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1547                                                       int off)
1548{
1549        struct blkg_rwstat a, b;
1550
1551        a = blkg_rwstat_recursive_sum(pd, off);
1552        b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1553        blkg_rwstat_merge(&a, &b);
1554        return a;
1555}
1556
1557static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1558{
1559        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1560
1561        cfqg_stats_reset(&cfqg->stats);
1562        cfqg_stats_reset(&cfqg->dead_stats);
1563}
1564
1565/*
1566 * Search for the cfq group current task belongs to. request_queue lock must
1567 * be held.
1568 */
1569static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1570                                                struct blkcg *blkcg)
1571{
1572        struct request_queue *q = cfqd->queue;
1573        struct cfq_group *cfqg = NULL;
1574
1575        /* avoid lookup for the common case where there's no blkcg */
1576        if (blkcg == &blkcg_root) {
1577                cfqg = cfqd->root_group;
1578        } else {
1579                struct blkcg_gq *blkg;
1580
1581                blkg = blkg_lookup_create(blkcg, q);
1582                if (!IS_ERR(blkg))
1583                        cfqg = blkg_to_cfqg(blkg);
1584        }
1585
1586        return cfqg;
1587}
1588
1589static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1590{
1591        /* Currently, all async queues are mapped to root group */
1592        if (!cfq_cfqq_sync(cfqq))
1593                cfqg = cfqq->cfqd->root_group;
1594
1595        cfqq->cfqg = cfqg;
1596        /* cfqq reference on cfqg */
1597        cfqg_get(cfqg);
1598}
1599
1600static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1601                                     struct blkg_policy_data *pd, int off)
1602{
1603        struct cfq_group *cfqg = pd_to_cfqg(pd);
1604
1605        if (!cfqg->dev_weight)
1606                return 0;
1607        return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1608}
1609
1610static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1611                                    struct seq_file *sf)
1612{
1613        blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1614                          cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
1615                          false);
1616        return 0;
1617}
1618
1619static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1620                                          struct blkg_policy_data *pd, int off)
1621{
1622        struct cfq_group *cfqg = pd_to_cfqg(pd);
1623
1624        if (!cfqg->dev_leaf_weight)
1625                return 0;
1626        return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1627}
1628
1629static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
1630                                         struct cftype *cft,
1631                                         struct seq_file *sf)
1632{
1633        blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1634                          cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
1635                          false);
1636        return 0;
1637}
1638
1639static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1640                            struct seq_file *sf)
1641{
1642        seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
1643        return 0;
1644}
1645
1646static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
1647                                 struct seq_file *sf)
1648{
1649        seq_printf(sf, "%u\n",
1650                   cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
1651        return 0;
1652}
1653
1654static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1655                                    const char *buf, bool is_leaf_weight)
1656{
1657        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1658        struct blkg_conf_ctx ctx;
1659        struct cfq_group *cfqg;
1660        int ret;
1661
1662        ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1663        if (ret)
1664                return ret;
1665
1666        ret = -EINVAL;
1667        cfqg = blkg_to_cfqg(ctx.blkg);
1668        if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1669                if (!is_leaf_weight) {
1670                        cfqg->dev_weight = ctx.v;
1671                        cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1672                } else {
1673                        cfqg->dev_leaf_weight = ctx.v;
1674                        cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1675                }
1676                ret = 0;
1677        }
1678
1679        blkg_conf_finish(&ctx);
1680        return ret;
1681}
1682
1683static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1684                                  const char *buf)
1685{
1686        return __cfqg_set_weight_device(cgrp, cft, buf, false);
1687}
1688
1689static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
1690                                       const char *buf)
1691{
1692        return __cfqg_set_weight_device(cgrp, cft, buf, true);
1693}
1694
1695static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
1696                            bool is_leaf_weight)
1697{
1698        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1699        struct blkcg_gq *blkg;
1700
1701        if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1702                return -EINVAL;
1703
1704        spin_lock_irq(&blkcg->lock);
1705
1706        if (!is_leaf_weight)
1707                blkcg->cfq_weight = val;
1708        else
1709                blkcg->cfq_leaf_weight = val;
1710
1711        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1712                struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1713
1714                if (!cfqg)
1715                        continue;
1716
1717                if (!is_leaf_weight) {
1718                        if (!cfqg->dev_weight)
1719                                cfqg->new_weight = blkcg->cfq_weight;
1720                } else {
1721                        if (!cfqg->dev_leaf_weight)
1722                                cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1723                }
1724        }
1725
1726        spin_unlock_irq(&blkcg->lock);
1727        return 0;
1728}
1729
1730static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1731{
1732        return __cfq_set_weight(cgrp, cft, val, false);
1733}
1734
1735static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1736{
1737        return __cfq_set_weight(cgrp, cft, val, true);
1738}
1739
1740static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1741                           struct seq_file *sf)
1742{
1743        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1744
1745        blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
1746                          cft->private, false);
1747        return 0;
1748}
1749
1750static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
1751                             struct seq_file *sf)
1752{
1753        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1754
1755        blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
1756                          cft->private, true);
1757        return 0;
1758}
1759
1760static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1761                                      struct blkg_policy_data *pd, int off)
1762{
1763        u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1764
1765        return __blkg_prfill_u64(sf, pd, sum);
1766}
1767
1768static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1769                                        struct blkg_policy_data *pd, int off)
1770{
1771        struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1772
1773        return __blkg_prfill_rwstat(sf, pd, &sum);
1774}
1775
1776static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
1777                                     struct seq_file *sf)
1778{
1779        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1780
1781        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
1782                          &blkcg_policy_cfq, cft->private, false);
1783        return 0;
1784}
1785
1786static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
1787                                       struct seq_file *sf)
1788{
1789        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1790
1791        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
1792                          &blkcg_policy_cfq, cft->private, true);
1793        return 0;
1794}
1795
1796#ifdef CONFIG_DEBUG_BLK_CGROUP
1797static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1798                                      struct blkg_policy_data *pd, int off)
1799{
1800        struct cfq_group *cfqg = pd_to_cfqg(pd);
1801        u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1802        u64 v = 0;
1803
1804        if (samples) {
1805                v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1806                do_div(v, samples);
1807        }
1808        __blkg_prfill_u64(sf, pd, v);
1809        return 0;
1810}
1811
1812/* print avg_queue_size */
1813static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1814                                     struct seq_file *sf)
1815{
1816        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1817
1818        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
1819                          &blkcg_policy_cfq, 0, false);
1820        return 0;
1821}
1822#endif  /* CONFIG_DEBUG_BLK_CGROUP */
1823
1824static struct cftype cfq_blkcg_files[] = {
1825        /* on root, weight is mapped to leaf_weight */
1826        {
1827                .name = "weight_device",
1828                .flags = CFTYPE_ONLY_ON_ROOT,
1829                .read_seq_string = cfqg_print_leaf_weight_device,
1830                .write_string = cfqg_set_leaf_weight_device,
1831                .max_write_len = 256,
1832        },
1833        {
1834                .name = "weight",
1835                .flags = CFTYPE_ONLY_ON_ROOT,
1836                .read_seq_string = cfq_print_leaf_weight,
1837                .write_u64 = cfq_set_leaf_weight,
1838        },
1839
1840        /* no such mapping necessary for !roots */
1841        {
1842                .name = "weight_device",
1843                .flags = CFTYPE_NOT_ON_ROOT,
1844                .read_seq_string = cfqg_print_weight_device,
1845                .write_string = cfqg_set_weight_device,
1846                .max_write_len = 256,
1847        },
1848        {
1849                .name = "weight",
1850                .flags = CFTYPE_NOT_ON_ROOT,
1851                .read_seq_string = cfq_print_weight,
1852                .write_u64 = cfq_set_weight,
1853        },
1854
1855        {
1856                .name = "leaf_weight_device",
1857                .read_seq_string = cfqg_print_leaf_weight_device,
1858                .write_string = cfqg_set_leaf_weight_device,
1859                .max_write_len = 256,
1860        },
1861        {
1862                .name = "leaf_weight",
1863                .read_seq_string = cfq_print_leaf_weight,
1864                .write_u64 = cfq_set_leaf_weight,
1865        },
1866
1867        /* statistics, covers only the tasks in the cfqg */
1868        {
1869                .name = "time",
1870                .private = offsetof(struct cfq_group, stats.time),
1871                .read_seq_string = cfqg_print_stat,
1872        },
1873        {
1874                .name = "sectors",
1875                .private = offsetof(struct cfq_group, stats.sectors),
1876                .read_seq_string = cfqg_print_stat,
1877        },
1878        {
1879                .name = "io_service_bytes",
1880                .private = offsetof(struct cfq_group, stats.service_bytes),
1881                .read_seq_string = cfqg_print_rwstat,
1882        },
1883        {
1884                .name = "io_serviced",
1885                .private = offsetof(struct cfq_group, stats.serviced),
1886                .read_seq_string = cfqg_print_rwstat,
1887        },
1888        {
1889                .name = "io_service_time",
1890                .private = offsetof(struct cfq_group, stats.service_time),
1891                .read_seq_string = cfqg_print_rwstat,
1892        },
1893        {
1894                .name = "io_wait_time",
1895                .private = offsetof(struct cfq_group, stats.wait_time),
1896                .read_seq_string = cfqg_print_rwstat,
1897        },
1898        {
1899                .name = "io_merged",
1900                .private = offsetof(struct cfq_group, stats.merged),
1901                .read_seq_string = cfqg_print_rwstat,
1902        },
1903        {
1904                .name = "io_queued",
1905                .private = offsetof(struct cfq_group, stats.queued),
1906                .read_seq_string = cfqg_print_rwstat,
1907        },
1908
1909        /* the same statictics which cover the cfqg and its descendants */
1910        {
1911                .name = "time_recursive",
1912                .private = offsetof(struct cfq_group, stats.time),
1913                .read_seq_string = cfqg_print_stat_recursive,
1914        },
1915        {
1916                .name = "sectors_recursive",
1917                .private = offsetof(struct cfq_group, stats.sectors),
1918                .read_seq_string = cfqg_print_stat_recursive,
1919        },
1920        {
1921                .name = "io_service_bytes_recursive",
1922                .private = offsetof(struct cfq_group, stats.service_bytes),
1923                .read_seq_string = cfqg_print_rwstat_recursive,
1924        },
1925        {
1926                .name = "io_serviced_recursive",
1927                .private = offsetof(struct cfq_group, stats.serviced),
1928                .read_seq_string = cfqg_print_rwstat_recursive,
1929        },
1930        {
1931                .name = "io_service_time_recursive",
1932                .private = offsetof(struct cfq_group, stats.service_time),
1933                .read_seq_string = cfqg_print_rwstat_recursive,
1934        },
1935        {
1936                .name = "io_wait_time_recursive",
1937                .private = offsetof(struct cfq_group, stats.wait_time),
1938                .read_seq_string = cfqg_print_rwstat_recursive,
1939        },
1940        {
1941                .name = "io_merged_recursive",
1942                .private = offsetof(struct cfq_group, stats.merged),
1943                .read_seq_string = cfqg_print_rwstat_recursive,
1944        },
1945        {
1946                .name = "io_queued_recursive",
1947                .private = offsetof(struct cfq_group, stats.queued),
1948                .read_seq_string = cfqg_print_rwstat_recursive,
1949        },
1950#ifdef CONFIG_DEBUG_BLK_CGROUP
1951        {
1952                .name = "avg_queue_size",
1953                .read_seq_string = cfqg_print_avg_queue_size,
1954        },
1955        {
1956                .name = "group_wait_time",
1957                .private = offsetof(struct cfq_group, stats.group_wait_time),
1958                .read_seq_string = cfqg_print_stat,
1959        },
1960        {
1961                .name = "idle_time",
1962                .private = offsetof(struct cfq_group, stats.idle_time),
1963                .read_seq_string = cfqg_print_stat,
1964        },
1965        {
1966                .name = "empty_time",
1967                .private = offsetof(struct cfq_group, stats.empty_time),
1968                .read_seq_string = cfqg_print_stat,
1969        },
1970        {
1971                .name = "dequeue",
1972                .private = offsetof(struct cfq_group, stats.dequeue),
1973                .read_seq_string = cfqg_print_stat,
1974        },
1975        {
1976                .name = "unaccounted_time",
1977                .private = offsetof(struct cfq_group, stats.unaccounted_time),
1978                .read_seq_string = cfqg_print_stat,
1979        },
1980#endif  /* CONFIG_DEBUG_BLK_CGROUP */
1981        { }     /* terminate */
1982};
1983#else /* GROUP_IOSCHED */
1984static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1985                                                struct blkcg *blkcg)
1986{
1987        return cfqd->root_group;
1988}
1989
1990static inline void
1991cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1992        cfqq->cfqg = cfqg;
1993}
1994
1995#endif /* GROUP_IOSCHED */
1996
1997/*
1998 * The cfqd->service_trees holds all pending cfq_queue's that have
1999 * requests waiting to be processed. It is sorted in the order that
2000 * we will service the queues.
2001 */
2002static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2003                                 bool add_front)
2004{
2005        struct rb_node **p, *parent;
2006        struct cfq_queue *__cfqq;
2007        unsigned long rb_key;
2008        struct cfq_rb_root *st;
2009        int left;
2010        int new_cfqq = 1;
2011
2012        st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2013        if (cfq_class_idle(cfqq)) {
2014                rb_key = CFQ_IDLE_DELAY;
2015                parent = rb_last(&st->rb);
2016                if (parent && parent != &cfqq->rb_node) {
2017                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2018                        rb_key += __cfqq->rb_key;
2019                } else
2020                        rb_key += jiffies;
2021        } else if (!add_front) {
2022                /*
2023                 * Get our rb key offset. Subtract any residual slice
2024                 * value carried from last service. A negative resid
2025                 * count indicates slice overrun, and this should position
2026                 * the next service time further away in the tree.
2027                 */
2028                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2029                rb_key -= cfqq->slice_resid;
2030                cfqq->slice_resid = 0;
2031        } else {
2032                rb_key = -HZ;
2033                __cfqq = cfq_rb_first(st);
2034                rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2035        }
2036
2037        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2038                new_cfqq = 0;
2039                /*
2040                 * same position, nothing more to do
2041                 */
2042                if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2043                        return;
2044
2045                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2046                cfqq->service_tree = NULL;
2047        }
2048
2049        left = 1;
2050        parent = NULL;
2051        cfqq->service_tree = st;
2052        p = &st->rb.rb_node;
2053        while (*p) {
2054                parent = *p;
2055                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2056
2057                /*
2058                 * sort by key, that represents service time.
2059                 */
2060                if (time_before(rb_key, __cfqq->rb_key))
2061                        p = &parent->rb_left;
2062                else {
2063                        p = &parent->rb_right;
2064                        left = 0;
2065                }
2066        }
2067
2068        if (left)
2069                st->left = &cfqq->rb_node;
2070
2071        cfqq->rb_key = rb_key;
2072        rb_link_node(&cfqq->rb_node, parent, p);
2073        rb_insert_color(&cfqq->rb_node, &st->rb);
2074        st->count++;
2075        if (add_front || !new_cfqq)
2076                return;
2077        cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2078}
2079
2080static struct cfq_queue *
2081cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2082                     sector_t sector, struct rb_node **ret_parent,
2083                     struct rb_node ***rb_link)
2084{
2085        struct rb_node **p, *parent;
2086        struct cfq_queue *cfqq = NULL;
2087
2088        parent = NULL;
2089        p = &root->rb_node;
2090        while (*p) {
2091                struct rb_node **n;
2092
2093                parent = *p;
2094                cfqq = rb_entry(parent, struct cfq_queue, p_node);
2095
2096                /*
2097                 * Sort strictly based on sector.  Smallest to the left,
2098                 * largest to the right.
2099                 */
2100                if (sector > blk_rq_pos(cfqq->next_rq))
2101                        n = &(*p)->rb_right;
2102                else if (sector < blk_rq_pos(cfqq->next_rq))
2103                        n = &(*p)->rb_left;
2104                else
2105                        break;
2106                p = n;
2107                cfqq = NULL;
2108        }
2109
2110        *ret_parent = parent;
2111        if (rb_link)
2112                *rb_link = p;
2113        return cfqq;
2114}
2115
2116static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2117{
2118        struct rb_node **p, *parent;
2119        struct cfq_queue *__cfqq;
2120
2121        if (cfqq->p_root) {
2122                rb_erase(&cfqq->p_node, cfqq->p_root);
2123                cfqq->p_root = NULL;
2124        }
2125
2126        if (cfq_class_idle(cfqq))
2127                return;
2128        if (!cfqq->next_rq)
2129                return;
2130
2131        cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2132        __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2133                                      blk_rq_pos(cfqq->next_rq), &parent, &p);
2134        if (!__cfqq) {
2135                rb_link_node(&cfqq->p_node, parent, p);
2136                rb_insert_color(&cfqq->p_node, cfqq->p_root);
2137        } else
2138                cfqq->p_root = NULL;
2139}
2140
2141/*
2142 * Update cfqq's position in the service tree.
2143 */
2144static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2145{
2146        /*
2147         * Resorting requires the cfqq to be on the RR list already.
2148         */
2149        if (cfq_cfqq_on_rr(cfqq)) {
2150                cfq_service_tree_add(cfqd, cfqq, 0);
2151                cfq_prio_tree_add(cfqd, cfqq);
2152        }
2153}
2154
2155/*
2156 * add to busy list of queues for service, trying to be fair in ordering
2157 * the pending list according to last request service
2158 */
2159static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2160{
2161        cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2162        BUG_ON(cfq_cfqq_on_rr(cfqq));
2163        cfq_mark_cfqq_on_rr(cfqq);
2164        cfqd->busy_queues++;
2165        if (cfq_cfqq_sync(cfqq))
2166                cfqd->busy_sync_queues++;
2167
2168        cfq_resort_rr_list(cfqd, cfqq);
2169}
2170
2171/*
2172 * Called when the cfqq no longer has requests pending, remove it from
2173 * the service tree.
2174 */
2175static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2176{
2177        cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2178        BUG_ON(!cfq_cfqq_on_rr(cfqq));
2179        cfq_clear_cfqq_on_rr(cfqq);
2180
2181        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2182                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2183                cfqq->service_tree = NULL;
2184        }
2185        if (cfqq->p_root) {
2186                rb_erase(&cfqq->p_node, cfqq->p_root);
2187                cfqq->p_root = NULL;
2188        }
2189
2190        cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2191        BUG_ON(!cfqd->busy_queues);
2192        cfqd->busy_queues--;
2193        if (cfq_cfqq_sync(cfqq))
2194                cfqd->busy_sync_queues--;
2195}
2196
2197/*
2198 * rb tree support functions
2199 */
2200static void cfq_del_rq_rb(struct request *rq)
2201{
2202        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2203        const int sync = rq_is_sync(rq);
2204
2205        BUG_ON(!cfqq->queued[sync]);
2206        cfqq->queued[sync]--;
2207
2208        elv_rb_del(&cfqq->sort_list, rq);
2209
2210        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2211                /*
2212                 * Queue will be deleted from service tree when we actually
2213                 * expire it later. Right now just remove it from prio tree
2214                 * as it is empty.
2215                 */
2216                if (cfqq->p_root) {
2217                        rb_erase(&cfqq->p_node, cfqq->p_root);
2218                        cfqq->p_root = NULL;
2219                }
2220        }
2221}
2222
2223static void cfq_add_rq_rb(struct request *rq)
2224{
2225        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2226        struct cfq_data *cfqd = cfqq->cfqd;
2227        struct request *prev;
2228
2229        cfqq->queued[rq_is_sync(rq)]++;
2230
2231        elv_rb_add(&cfqq->sort_list, rq);
2232
2233        if (!cfq_cfqq_on_rr(cfqq))
2234                cfq_add_cfqq_rr(cfqd, cfqq);
2235
2236        /*
2237         * check if this request is a better next-serve candidate
2238         */
2239        prev = cfqq->next_rq;
2240        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2241
2242        /*
2243         * adjust priority tree position, if ->next_rq changes
2244         */
2245        if (prev != cfqq->next_rq)
2246                cfq_prio_tree_add(cfqd, cfqq);
2247
2248        BUG_ON(!cfqq->next_rq);
2249}
2250
2251static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2252{
2253        elv_rb_del(&cfqq->sort_list, rq);
2254        cfqq->queued[rq_is_sync(rq)]--;
2255        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2256        cfq_add_rq_rb(rq);
2257        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2258                                 rq->cmd_flags);
2259}
2260
2261static struct request *
2262cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2263{
2264        struct task_struct *tsk = current;
2265        struct cfq_io_cq *cic;
2266        struct cfq_queue *cfqq;
2267
2268        cic = cfq_cic_lookup(cfqd, tsk->io_context);
2269        if (!cic)
2270                return NULL;
2271
2272        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2273        if (cfqq)
2274                return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2275
2276        return NULL;
2277}
2278
2279static void cfq_activate_request(struct request_queue *q, struct request *rq)
2280{
2281        struct cfq_data *cfqd = q->elevator->elevator_data;
2282
2283        cfqd->rq_in_driver++;
2284        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2285                                                cfqd->rq_in_driver);
2286
2287        cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2288}
2289
2290static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2291{
2292        struct cfq_data *cfqd = q->elevator->elevator_data;
2293
2294        WARN_ON(!cfqd->rq_in_driver);
2295        cfqd->rq_in_driver--;
2296        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2297                                                cfqd->rq_in_driver);
2298}
2299
2300static void cfq_remove_request(struct request *rq)
2301{
2302        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2303
2304        if (cfqq->next_rq == rq)
2305                cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2306
2307        list_del_init(&rq->queuelist);
2308        cfq_del_rq_rb(rq);
2309
2310        cfqq->cfqd->rq_queued--;
2311        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2312        if (rq->cmd_flags & REQ_PRIO) {
2313                WARN_ON(!cfqq->prio_pending);
2314                cfqq->prio_pending--;
2315        }
2316}
2317
2318static int cfq_merge(struct request_queue *q, struct request **req,
2319                     struct bio *bio)
2320{
2321        struct cfq_data *cfqd = q->elevator->elevator_data;
2322        struct request *__rq;
2323
2324        __rq = cfq_find_rq_fmerge(cfqd, bio);
2325        if (__rq && elv_rq_merge_ok(__rq, bio)) {
2326                *req = __rq;
2327                return ELEVATOR_FRONT_MERGE;
2328        }
2329
2330        return ELEVATOR_NO_MERGE;
2331}
2332
2333static void cfq_merged_request(struct request_queue *q, struct request *req,
2334                               int type)
2335{
2336        if (type == ELEVATOR_FRONT_MERGE) {
2337                struct cfq_queue *cfqq = RQ_CFQQ(req);
2338
2339                cfq_reposition_rq_rb(cfqq, req);
2340        }
2341}
2342
2343static void cfq_bio_merged(struct request_queue *q, struct request *req,
2344                                struct bio *bio)
2345{
2346        cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2347}
2348
2349static void
2350cfq_merged_requests(struct request_queue *q, struct request *rq,
2351                    struct request *next)
2352{
2353        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2354        struct cfq_data *cfqd = q->elevator->elevator_data;
2355
2356        /*
2357         * reposition in fifo if next is older than rq
2358         */
2359        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2360            time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
2361            cfqq == RQ_CFQQ(next)) {
2362                list_move(&rq->queuelist, &next->queuelist);
2363                rq_set_fifo_time(rq, rq_fifo_time(next));
2364        }
2365
2366        if (cfqq->next_rq == next)
2367                cfqq->next_rq = rq;
2368        cfq_remove_request(next);
2369        cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2370
2371        cfqq = RQ_CFQQ(next);
2372        /*
2373         * all requests of this queue are merged to other queues, delete it
2374         * from the service tree. If it's the active_queue,
2375         * cfq_dispatch_requests() will choose to expire it or do idle
2376         */
2377        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2378            cfqq != cfqd->active_queue)
2379                cfq_del_cfqq_rr(cfqd, cfqq);
2380}
2381
2382static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2383                           struct bio *bio)
2384{
2385        struct cfq_data *cfqd = q->elevator->elevator_data;
2386        struct cfq_io_cq *cic;
2387        struct cfq_queue *cfqq;
2388
2389        /*
2390         * Disallow merge of a sync bio into an async request.
2391         */
2392        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2393                return false;
2394
2395        /*
2396         * Lookup the cfqq that this bio will be queued with and allow
2397         * merge only if rq is queued there.
2398         */
2399        cic = cfq_cic_lookup(cfqd, current->io_context);
2400        if (!cic)
2401                return false;
2402
2403        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2404        return cfqq == RQ_CFQQ(rq);
2405}
2406
2407static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2408{
2409        del_timer(&cfqd->idle_slice_timer);
2410        cfqg_stats_update_idle_time(cfqq->cfqg);
2411}
2412
2413static void __cfq_set_active_queue(struct cfq_data *cfqd,
2414                                   struct cfq_queue *cfqq)
2415{
2416        if (cfqq) {
2417                cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2418                                cfqd->serving_wl_class, cfqd->serving_wl_type);
2419                cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2420                cfqq->slice_start = 0;
2421                cfqq->dispatch_start = jiffies;
2422                cfqq->allocated_slice = 0;
2423                cfqq->slice_end = 0;
2424                cfqq->slice_dispatch = 0;
2425                cfqq->nr_sectors = 0;
2426
2427                cfq_clear_cfqq_wait_request(cfqq);
2428                cfq_clear_cfqq_must_dispatch(cfqq);
2429                cfq_clear_cfqq_must_alloc_slice(cfqq);
2430                cfq_clear_cfqq_fifo_expire(cfqq);
2431                cfq_mark_cfqq_slice_new(cfqq);
2432
2433                cfq_del_timer(cfqd, cfqq);
2434        }
2435
2436        cfqd->active_queue = cfqq;
2437}
2438
2439/*
2440 * current cfqq expired its slice (or was too idle), select new one
2441 */
2442static void
2443__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2444                    bool timed_out)
2445{
2446        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2447
2448        if (cfq_cfqq_wait_request(cfqq))
2449                cfq_del_timer(cfqd, cfqq);
2450
2451        cfq_clear_cfqq_wait_request(cfqq);
2452        cfq_clear_cfqq_wait_busy(cfqq);
2453
2454        /*
2455         * If this cfqq is shared between multiple processes, check to
2456         * make sure that those processes are still issuing I/Os within
2457         * the mean seek distance.  If not, it may be time to break the
2458         * queues apart again.
2459         */
2460        if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2461                cfq_mark_cfqq_split_coop(cfqq);
2462
2463        /*
2464         * store what was left of this slice, if the queue idled/timed out
2465         */
2466        if (timed_out) {
2467                if (cfq_cfqq_slice_new(cfqq))
2468                        cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2469                else
2470                        cfqq->slice_resid = cfqq->slice_end - jiffies;
2471                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2472        }
2473
2474        cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2475
2476        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2477                cfq_del_cfqq_rr(cfqd, cfqq);
2478
2479        cfq_resort_rr_list(cfqd, cfqq);
2480
2481        if (cfqq == cfqd->active_queue)
2482                cfqd->active_queue = NULL;
2483
2484        if (cfqd->active_cic) {
2485                put_io_context(cfqd->active_cic->icq.ioc);
2486                cfqd->active_cic = NULL;
2487        }
2488}
2489
2490static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2491{
2492        struct cfq_queue *cfqq = cfqd->active_queue;
2493
2494        if (cfqq)
2495                __cfq_slice_expired(cfqd, cfqq, timed_out);
2496}
2497
2498/*
2499 * Get next queue for service. Unless we have a queue preemption,
2500 * we'll simply select the first cfqq in the service tree.
2501 */
2502static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2503{
2504        struct cfq_rb_root *st = st_for(cfqd->serving_group,
2505                        cfqd->serving_wl_class, cfqd->serving_wl_type);
2506
2507        if (!cfqd->rq_queued)
2508                return NULL;
2509
2510        /* There is nothing to dispatch */
2511        if (!st)
2512                return NULL;
2513        if (RB_EMPTY_ROOT(&st->rb))
2514                return NULL;
2515        return cfq_rb_first(st);
2516}
2517
2518static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2519{
2520        struct cfq_group *cfqg;
2521        struct cfq_queue *cfqq;
2522        int i, j;
2523        struct cfq_rb_root *st;
2524
2525        if (!cfqd->rq_queued)
2526                return NULL;
2527
2528        cfqg = cfq_get_next_cfqg(cfqd);
2529        if (!cfqg)
2530                return NULL;
2531
2532        for_each_cfqg_st(cfqg, i, j, st)
2533                if ((cfqq = cfq_rb_first(st)) != NULL)
2534                        return cfqq;
2535        return NULL;
2536}
2537
2538/*
2539 * Get and set a new active queue for service.
2540 */
2541static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2542                                              struct cfq_queue *cfqq)
2543{
2544        if (!cfqq)
2545                cfqq = cfq_get_next_queue(cfqd);
2546
2547        __cfq_set_active_queue(cfqd, cfqq);
2548        return cfqq;
2549}
2550
2551static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2552                                          struct request *rq)
2553{
2554        if (blk_rq_pos(rq) >= cfqd->last_position)
2555                return blk_rq_pos(rq) - cfqd->last_position;
2556        else
2557                return cfqd->last_position - blk_rq_pos(rq);
2558}
2559
2560static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2561                               struct request *rq)
2562{
2563        return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2564}
2565
2566static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2567                                    struct cfq_queue *cur_cfqq)
2568{
2569        struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2570        struct rb_node *parent, *node;
2571        struct cfq_queue *__cfqq;
2572        sector_t sector = cfqd->last_position;
2573
2574        if (RB_EMPTY_ROOT(root))
2575                return NULL;
2576
2577        /*
2578         * First, if we find a request starting at the end of the last
2579         * request, choose it.
2580         */
2581        __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2582        if (__cfqq)
2583                return __cfqq;
2584
2585        /*
2586         * If the exact sector wasn't found, the parent of the NULL leaf
2587         * will contain the closest sector.
2588         */
2589        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2590        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2591                return __cfqq;
2592
2593        if (blk_rq_pos(__cfqq->next_rq) < sector)
2594                node = rb_next(&__cfqq->p_node);
2595        else
2596                node = rb_prev(&__cfqq->p_node);
2597        if (!node)
2598                return NULL;
2599
2600        __cfqq = rb_entry(node, struct cfq_queue, p_node);
2601        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2602                return __cfqq;
2603
2604        return NULL;
2605}
2606
2607/*
2608 * cfqd - obvious
2609 * cur_cfqq - passed in so that we don't decide that the current queue is
2610 *            closely cooperating with itself.
2611 *
2612 * So, basically we're assuming that that cur_cfqq has dispatched at least
2613 * one request, and that cfqd->last_position reflects a position on the disk
2614 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2615 * assumption.
2616 */
2617static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2618                                              struct cfq_queue *cur_cfqq)
2619{
2620        struct cfq_queue *cfqq;
2621
2622        if (cfq_class_idle(cur_cfqq))
2623                return NULL;
2624        if (!cfq_cfqq_sync(cur_cfqq))
2625                return NULL;
2626        if (CFQQ_SEEKY(cur_cfqq))
2627                return NULL;
2628
2629        /*
2630         * Don't search priority tree if it's the only queue in the group.
2631         */
2632        if (cur_cfqq->cfqg->nr_cfqq == 1)
2633                return NULL;
2634
2635        /*
2636         * We should notice if some of the queues are cooperating, eg
2637         * working closely on the same area of the disk. In that case,
2638         * we can group them together and don't waste time idling.
2639         */
2640        cfqq = cfqq_close(cfqd, cur_cfqq);
2641        if (!cfqq)
2642                return NULL;
2643
2644        /* If new queue belongs to different cfq_group, don't choose it */
2645        if (cur_cfqq->cfqg != cfqq->cfqg)
2646                return NULL;
2647
2648        /*
2649         * It only makes sense to merge sync queues.
2650         */
2651        if (!cfq_cfqq_sync(cfqq))
2652                return NULL;
2653        if (CFQQ_SEEKY(cfqq))
2654                return NULL;
2655
2656        /*
2657         * Do not merge queues of different priority classes
2658         */
2659        if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2660                return NULL;
2661
2662        return cfqq;
2663}
2664
2665/*
2666 * Determine whether we should enforce idle window for this queue.
2667 */
2668
2669static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2670{
2671        enum wl_class_t wl_class = cfqq_class(cfqq);
2672        struct cfq_rb_root *st = cfqq->service_tree;
2673
2674        BUG_ON(!st);
2675        BUG_ON(!st->count);
2676
2677        if (!cfqd->cfq_slice_idle)
2678                return false;
2679
2680        /* We never do for idle class queues. */
2681        if (wl_class == IDLE_WORKLOAD)
2682                return false;
2683
2684        /* We do for queues that were marked with idle window flag. */
2685        if (cfq_cfqq_idle_window(cfqq) &&
2686           !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2687                return true;
2688
2689        /*
2690         * Otherwise, we do only if they are the last ones
2691         * in their service tree.
2692         */
2693        if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2694           !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2695                return true;
2696        cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2697        return false;
2698}
2699
2700static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2701{
2702        struct cfq_queue *cfqq = cfqd->active_queue;
2703        struct cfq_io_cq *cic;
2704        unsigned long sl, group_idle = 0;
2705
2706        /*
2707         * SSD device without seek penalty, disable idling. But only do so
2708         * for devices that support queuing, otherwise we still have a problem
2709         * with sync vs async workloads.
2710         */
2711        if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2712                return;
2713
2714        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2715        WARN_ON(cfq_cfqq_slice_new(cfqq));
2716
2717        /*
2718         * idle is disabled, either manually or by past process history
2719         */
2720        if (!cfq_should_idle(cfqd, cfqq)) {
2721                /* no queue idling. Check for group idling */
2722                if (cfqd->cfq_group_idle)
2723                        group_idle = cfqd->cfq_group_idle;
2724                else
2725                        return;
2726        }
2727
2728        /*
2729         * still active requests from this queue, don't idle
2730         */
2731        if (cfqq->dispatched)
2732                return;
2733
2734        /*
2735         * task has exited, don't wait
2736         */
2737        cic = cfqd->active_cic;
2738        if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2739                return;
2740
2741        /*
2742         * If our average think time is larger than the remaining time
2743         * slice, then don't idle. This avoids overrunning the allotted
2744         * time slice.
2745         */
2746        if (sample_valid(cic->ttime.ttime_samples) &&
2747            (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2748                cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2749                             cic->ttime.ttime_mean);
2750                return;
2751        }
2752
2753        /* There are other queues in the group, don't do group idle */
2754        if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2755                return;
2756
2757        cfq_mark_cfqq_wait_request(cfqq);
2758
2759        if (group_idle)
2760                sl = cfqd->cfq_group_idle;
2761        else
2762                sl = cfqd->cfq_slice_idle;
2763
2764        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2765        cfqg_stats_set_start_idle_time(cfqq->cfqg);
2766        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2767                        group_idle ? 1 : 0);
2768}
2769
2770/*
2771 * Move request from internal lists to the request queue dispatch list.
2772 */
2773static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2774{
2775        struct cfq_data *cfqd = q->elevator->elevator_data;
2776        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2777
2778        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2779
2780        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2781        cfq_remove_request(rq);
2782        cfqq->dispatched++;
2783        (RQ_CFQG(rq))->dispatched++;
2784        elv_dispatch_sort(q, rq);
2785
2786        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2787        cfqq->nr_sectors += blk_rq_sectors(rq);
2788        cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2789}
2790
2791/*
2792 * return expired entry, or NULL to just start from scratch in rbtree
2793 */
2794static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2795{
2796        struct request *rq = NULL;
2797
2798        if (cfq_cfqq_fifo_expire(cfqq))
2799                return NULL;
2800
2801        cfq_mark_cfqq_fifo_expire(cfqq);
2802
2803        if (list_empty(&cfqq->fifo))
2804                return NULL;
2805
2806        rq = rq_entry_fifo(cfqq->fifo.next);
2807        if (time_before(jiffies, rq_fifo_time(rq)))
2808                rq = NULL;
2809
2810        cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2811        return rq;
2812}
2813
2814static inline int
2815cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2816{
2817        const int base_rq = cfqd->cfq_slice_async_rq;
2818
2819        WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2820
2821        return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2822}
2823
2824/*
2825 * Must be called with the queue_lock held.
2826 */
2827static int cfqq_process_refs(struct cfq_queue *cfqq)
2828{
2829        int process_refs, io_refs;
2830
2831        io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2832        process_refs = cfqq->ref - io_refs;
2833        BUG_ON(process_refs < 0);
2834        return process_refs;
2835}
2836
2837static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2838{
2839        int process_refs, new_process_refs;
2840        struct cfq_queue *__cfqq;
2841
2842        /*
2843         * If there are no process references on the new_cfqq, then it is
2844         * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2845         * chain may have dropped their last reference (not just their
2846         * last process reference).
2847         */
2848        if (!cfqq_process_refs(new_cfqq))
2849                return;
2850
2851        /* Avoid a circular list and skip interim queue merges */
2852        while ((__cfqq = new_cfqq->new_cfqq)) {
2853                if (__cfqq == cfqq)
2854                        return;
2855                new_cfqq = __cfqq;
2856        }
2857
2858        process_refs = cfqq_process_refs(cfqq);
2859        new_process_refs = cfqq_process_refs(new_cfqq);
2860        /*
2861         * If the process for the cfqq has gone away, there is no
2862         * sense in merging the queues.
2863         */
2864        if (process_refs == 0 || new_process_refs == 0)
2865                return;
2866
2867        /*
2868         * Merge in the direction of the lesser amount of work.
2869         */
2870        if (new_process_refs >= process_refs) {
2871                cfqq->new_cfqq = new_cfqq;
2872                new_cfqq->ref += process_refs;
2873        } else {
2874                new_cfqq->new_cfqq = cfqq;
2875                cfqq->ref += new_process_refs;
2876        }
2877}
2878
2879static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
2880                        struct cfq_group *cfqg, enum wl_class_t wl_class)
2881{
2882        struct cfq_queue *queue;
2883        int i;
2884        bool key_valid = false;
2885        unsigned long lowest_key = 0;
2886        enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2887
2888        for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2889                /* select the one with lowest rb_key */
2890                queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2891                if (queue &&
2892                    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2893                        lowest_key = queue->rb_key;
2894                        cur_best = i;
2895                        key_valid = true;
2896                }
2897        }
2898
2899        return cur_best;
2900}
2901
2902static void
2903choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
2904{
2905        unsigned slice;
2906        unsigned count;
2907        struct cfq_rb_root *st;
2908        unsigned group_slice;
2909        enum wl_class_t original_class = cfqd->serving_wl_class;
2910
2911        /* Choose next priority. RT > BE > IDLE */
2912        if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2913                cfqd->serving_wl_class = RT_WORKLOAD;
2914        else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2915                cfqd->serving_wl_class = BE_WORKLOAD;
2916        else {
2917                cfqd->serving_wl_class = IDLE_WORKLOAD;
2918                cfqd->workload_expires = jiffies + 1;
2919                return;
2920        }
2921
2922        if (original_class != cfqd->serving_wl_class)
2923                goto new_workload;
2924
2925        /*
2926         * For RT and BE, we have to choose also the type
2927         * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2928         * expiration time
2929         */
2930        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2931        count = st->count;
2932
2933        /*
2934         * check workload expiration, and that we still have other queues ready
2935         */
2936        if (count && !time_after(jiffies, cfqd->workload_expires))
2937                return;
2938
2939new_workload:
2940        /* otherwise select new workload type */
2941        cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
2942                                        cfqd->serving_wl_class);
2943        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2944        count = st->count;
2945
2946        /*
2947         * the workload slice is computed as a fraction of target latency
2948         * proportional to the number of queues in that workload, over
2949         * all the queues in the same priority class
2950         */
2951        group_slice = cfq_group_slice(cfqd, cfqg);
2952
2953        slice = group_slice * count /
2954                max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2955                      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
2956                                        cfqg));
2957
2958        if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
2959                unsigned int tmp;
2960
2961                /*
2962                 * Async queues are currently system wide. Just taking
2963                 * proportion of queues with-in same group will lead to higher
2964                 * async ratio system wide as generally root group is going
2965                 * to have higher weight. A more accurate thing would be to
2966                 * calculate system wide asnc/sync ratio.
2967                 */
2968                tmp = cfqd->cfq_target_latency *
2969                        cfqg_busy_async_queues(cfqd, cfqg);
2970                tmp = tmp/cfqd->busy_queues;
2971                slice = min_t(unsigned, slice, tmp);
2972
2973                /* async workload slice is scaled down according to
2974                 * the sync/async slice ratio. */
2975                slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2976        } else
2977                /* sync workload slice is at least 2 * cfq_slice_idle */
2978                slice = max(slice, 2 * cfqd->cfq_slice_idle);
2979
2980        slice = max_t(unsigned, slice, CFQ_MIN_TT);
2981        cfq_log(cfqd, "workload slice:%d", slice);
2982        cfqd->workload_expires = jiffies + slice;
2983}
2984
2985static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2986{
2987        struct cfq_rb_root *st = &cfqd->grp_service_tree;
2988        struct cfq_group *cfqg;
2989
2990        if (RB_EMPTY_ROOT(&st->rb))
2991                return NULL;
2992        cfqg = cfq_rb_first_group(st);
2993        update_min_vdisktime(st);
2994        return cfqg;
2995}
2996
2997static void cfq_choose_cfqg(struct cfq_data *cfqd)
2998{
2999        struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3000
3001        cfqd->serving_group = cfqg;
3002
3003        /* Restore the workload type data */
3004        if (cfqg->saved_wl_slice) {
3005                cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3006                cfqd->serving_wl_type = cfqg->saved_wl_type;
3007                cfqd->serving_wl_class = cfqg->saved_wl_class;
3008        } else
3009                cfqd->workload_expires = jiffies - 1;
3010
3011        choose_wl_class_and_type(cfqd, cfqg);
3012}
3013
3014/*
3015 * Select a queue for service. If we have a current active queue,
3016 * check whether to continue servicing it, or retrieve and set a new one.
3017 */
3018static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3019{
3020        struct cfq_queue *cfqq, *new_cfqq = NULL;
3021
3022        cfqq = cfqd->active_queue;
3023        if (!cfqq)
3024                goto new_queue;
3025
3026        if (!cfqd->rq_queued)
3027                return NULL;
3028
3029        /*
3030         * We were waiting for group to get backlogged. Expire the queue
3031         */
3032        if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3033                goto expire;
3034
3035        /*
3036         * The active queue has run out of time, expire it and select new.
3037         */
3038        if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3039                /*
3040                 * If slice had not expired at the completion of last request
3041                 * we might not have turned on wait_busy flag. Don't expire
3042                 * the queue yet. Allow the group to get backlogged.
3043                 *
3044                 * The very fact that we have used the slice, that means we
3045                 * have been idling all along on this queue and it should be
3046                 * ok to wait for this request to complete.
3047                 */
3048                if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3049                    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3050                        cfqq = NULL;
3051                        goto keep_queue;
3052                } else
3053                        goto check_group_idle;
3054        }
3055
3056        /*
3057         * The active queue has requests and isn't expired, allow it to
3058         * dispatch.
3059         */
3060        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3061                goto keep_queue;
3062
3063        /*
3064         * If another queue has a request waiting within our mean seek
3065         * distance, let it run.  The expire code will check for close
3066         * cooperators and put the close queue at the front of the service
3067         * tree.  If possible, merge the expiring queue with the new cfqq.
3068         */
3069        new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3070        if (new_cfqq) {
3071                if (!cfqq->new_cfqq)
3072                        cfq_setup_merge(cfqq, new_cfqq);
3073                goto expire;
3074        }
3075
3076        /*
3077         * No requests pending. If the active queue still has requests in
3078         * flight or is idling for a new request, allow either of these
3079         * conditions to happen (or time out) before selecting a new queue.
3080         */
3081        if (timer_pending(&cfqd->idle_slice_timer)) {
3082                cfqq = NULL;
3083                goto keep_queue;
3084        }
3085
3086        /*
3087         * This is a deep seek queue, but the device is much faster than
3088         * the queue can deliver, don't idle
3089         **/
3090        if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3091            (cfq_cfqq_slice_new(cfqq) ||
3092            (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3093                cfq_clear_cfqq_deep(cfqq);
3094                cfq_clear_cfqq_idle_window(cfqq);
3095        }
3096
3097        if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3098                cfqq = NULL;
3099                goto keep_queue;
3100        }
3101
3102        /*
3103         * If group idle is enabled and there are requests dispatched from
3104         * this group, wait for requests to complete.
3105         */
3106check_group_idle:
3107        if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3108            cfqq->cfqg->dispatched &&
3109            !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3110                cfqq = NULL;
3111                goto keep_queue;
3112        }
3113
3114expire:
3115        cfq_slice_expired(cfqd, 0);
3116new_queue:
3117        /*
3118         * Current queue expired. Check if we have to switch to a new
3119         * service tree
3120         */
3121        if (!new_cfqq)
3122                cfq_choose_cfqg(cfqd);
3123
3124        cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3125keep_queue:
3126        return cfqq;
3127}
3128
3129static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3130{
3131        int dispatched = 0;
3132
3133        while (cfqq->next_rq) {
3134                cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3135                dispatched++;
3136        }
3137
3138        BUG_ON(!list_empty(&cfqq->fifo));
3139
3140        /* By default cfqq is not expired if it is empty. Do it explicitly */
3141        __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3142        return dispatched;
3143}
3144
3145/*
3146 * Drain our current requests. Used for barriers and when switching
3147 * io schedulers on-the-fly.
3148 */
3149static int cfq_forced_dispatch(struct cfq_data *cfqd)
3150{
3151        struct cfq_queue *cfqq;
3152        int dispatched = 0;
3153
3154        /* Expire the timeslice of the current active queue first */
3155        cfq_slice_expired(cfqd, 0);
3156        while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3157                __cfq_set_active_queue(cfqd, cfqq);
3158                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3159        }
3160
3161        BUG_ON(cfqd->busy_queues);
3162
3163        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3164        return dispatched;
3165}
3166
3167static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3168        struct cfq_queue *cfqq)
3169{
3170        /* the queue hasn't finished any request, can't estimate */
3171        if (cfq_cfqq_slice_new(cfqq))
3172                return true;
3173        if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3174                cfqq->slice_end))
3175                return true;
3176
3177        return false;
3178}
3179
3180static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3181{
3182        unsigned int max_dispatch;
3183
3184        /*
3185         * Drain async requests before we start sync IO
3186         */
3187        if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3188                return false;
3189
3190        /*
3191         * If this is an async queue and we have sync IO in flight, let it wait
3192         */
3193        if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3194                return false;
3195
3196        max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3197        if (cfq_class_idle(cfqq))
3198                max_dispatch = 1;
3199
3200        /*
3201         * Does this cfqq already have too much IO in flight?
3202         */
3203        if (cfqq->dispatched >= max_dispatch) {
3204                bool promote_sync = false;
3205                /*
3206                 * idle queue must always only have a single IO in flight
3207                 */
3208                if (cfq_class_idle(cfqq))
3209                        return false;
3210
3211                /*
3212                 * If there is only one sync queue
3213                 * we can ignore async queue here and give the sync
3214                 * queue no dispatch limit. The reason is a sync queue can
3215                 * preempt async queue, limiting the sync queue doesn't make
3216                 * sense. This is useful for aiostress test.
3217                 */
3218                if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3219                        promote_sync = true;
3220
3221                /*
3222                 * We have other queues, don't allow more IO from this one
3223                 */
3224                if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3225                                !promote_sync)
3226                        return false;
3227
3228                /*
3229                 * Sole queue user, no limit
3230                 */
3231                if (cfqd->busy_queues == 1 || promote_sync)
3232                        max_dispatch = -1;
3233                else
3234                        /*
3235                         * Normally we start throttling cfqq when cfq_quantum/2
3236                         * requests have been dispatched. But we can drive
3237                         * deeper queue depths at the beginning of slice
3238                         * subjected to upper limit of cfq_quantum.
3239                         * */
3240                        max_dispatch = cfqd->cfq_quantum;
3241        }
3242
3243        /*
3244         * Async queues must wait a bit before being allowed dispatch.
3245         * We also ramp up the dispatch depth gradually for async IO,
3246         * based on the last sync IO we serviced
3247         */
3248        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3249                unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3250                unsigned int depth;
3251
3252                depth = last_sync / cfqd->cfq_slice[1];
3253                if (!depth && !cfqq->dispatched)
3254                        depth = 1;
3255                if (depth < max_dispatch)
3256                        max_dispatch = depth;
3257        }
3258
3259        /*
3260         * If we're below the current max, allow a dispatch
3261         */
3262        return cfqq->dispatched < max_dispatch;
3263}
3264
3265/*
3266 * Dispatch a request from cfqq, moving them to the request queue
3267 * dispatch list.
3268 */
3269static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3270{
3271        struct request *rq;
3272
3273        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3274
3275        if (!cfq_may_dispatch(cfqd, cfqq))
3276                return false;
3277
3278        /*
3279         * follow expired path, else get first next available
3280         */
3281        rq = cfq_check_fifo(cfqq);
3282        if (!rq)
3283                rq = cfqq->next_rq;
3284
3285        /*
3286         * insert request into driver dispatch list
3287         */
3288        cfq_dispatch_insert(cfqd->queue, rq);
3289
3290        if (!cfqd->active_cic) {
3291                struct cfq_io_cq *cic = RQ_CIC(rq);
3292
3293                atomic_long_inc(&cic->icq.ioc->refcount);
3294                cfqd->active_cic = cic;
3295        }
3296
3297        return true;
3298}
3299
3300/*
3301 * Find the cfqq that we need to service and move a request from that to the
3302 * dispatch list
3303 */
3304static int cfq_dispatch_requests(struct request_queue *q, int force)
3305{
3306        struct cfq_data *cfqd = q->elevator->elevator_data;
3307        struct cfq_queue *cfqq;
3308
3309        if (!cfqd->busy_queues)
3310                return 0;
3311
3312        if (unlikely(force))
3313                return cfq_forced_dispatch(cfqd);
3314
3315        cfqq = cfq_select_queue(cfqd);
3316        if (!cfqq)
3317                return 0;
3318
3319        /*
3320         * Dispatch a request from this cfqq, if it is allowed
3321         */
3322        if (!cfq_dispatch_request(cfqd, cfqq))
3323                return 0;
3324
3325        cfqq->slice_dispatch++;
3326        cfq_clear_cfqq_must_dispatch(cfqq);
3327
3328        /*
3329         * expire an async queue immediately if it has used up its slice. idle
3330         * queue always expire after 1 dispatch round.
3331         */
3332        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3333            cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3334            cfq_class_idle(cfqq))) {
3335                cfqq->slice_end = jiffies + 1;
3336                cfq_slice_expired(cfqd, 0);
3337        }
3338
3339        cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3340        return 1;
3341}
3342
3343/*
3344 * task holds one reference to the queue, dropped when task exits. each rq
3345 * in-flight on this queue also holds a reference, dropped when rq is freed.
3346 *
3347 * Each cfq queue took a reference on the parent group. Drop it now.
3348 * queue lock must be held here.
3349 */
3350static void cfq_put_queue(struct cfq_queue *cfqq)
3351{
3352        struct cfq_data *cfqd = cfqq->cfqd;
3353        struct cfq_group *cfqg;
3354
3355        BUG_ON(cfqq->ref <= 0);
3356
3357        cfqq->ref--;
3358        if (cfqq->ref)
3359                return;
3360
3361        cfq_log_cfqq(cfqd, cfqq, "put_queue");
3362        BUG_ON(rb_first(&cfqq->sort_list));
3363        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3364        cfqg = cfqq->cfqg;
3365
3366        if (unlikely(cfqd->active_queue == cfqq)) {
3367                __cfq_slice_expired(cfqd, cfqq, 0);
3368                cfq_schedule_dispatch(cfqd);
3369        }
3370
3371        BUG_ON(cfq_cfqq_on_rr(cfqq));
3372        kmem_cache_free(cfq_pool, cfqq);
3373        cfqg_put(cfqg);
3374}
3375
3376static void cfq_put_cooperator(struct cfq_queue *cfqq)
3377{
3378        struct cfq_queue *__cfqq, *next;
3379
3380        /*
3381         * If this queue was scheduled to merge with another queue, be
3382         * sure to drop the reference taken on that queue (and others in
3383         * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3384         */
3385        __cfqq = cfqq->new_cfqq;
3386        while (__cfqq) {
3387                if (__cfqq == cfqq) {
3388                        WARN(1, "cfqq->new_cfqq loop detected\n");
3389                        break;
3390                }
3391                next = __cfqq->new_cfqq;
3392                cfq_put_queue(__cfqq);
3393                __cfqq = next;
3394        }
3395}
3396
3397static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3398{
3399        if (unlikely(cfqq == cfqd->active_queue)) {
3400                __cfq_slice_expired(cfqd, cfqq, 0);
3401                cfq_schedule_dispatch(cfqd);
3402        }
3403
3404        cfq_put_cooperator(cfqq);
3405
3406        cfq_put_queue(cfqq);
3407}
3408
3409static void cfq_init_icq(struct io_cq *icq)
3410{
3411        struct cfq_io_cq *cic = icq_to_cic(icq);
3412
3413        cic->ttime.last_end_request = jiffies;
3414}
3415
3416static void cfq_exit_icq(struct io_cq *icq)
3417{
3418        struct cfq_io_cq *cic = icq_to_cic(icq);
3419        struct cfq_data *cfqd = cic_to_cfqd(cic);
3420
3421        if (cic->cfqq[BLK_RW_ASYNC]) {
3422                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3423                cic->cfqq[BLK_RW_ASYNC] = NULL;
3424        }
3425
3426        if (cic->cfqq[BLK_RW_SYNC]) {
3427                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3428                cic->cfqq[BLK_RW_SYNC] = NULL;
3429        }
3430}
3431
3432static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3433{
3434        struct task_struct *tsk = current;
3435        int ioprio_class;
3436
3437        if (!cfq_cfqq_prio_changed(cfqq))
3438                return;
3439
3440        ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3441        switch (ioprio_class) {
3442        default:
3443                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3444        case IOPRIO_CLASS_NONE:
3445                /*
3446                 * no prio set, inherit CPU scheduling settings
3447                 */
3448                cfqq->ioprio = task_nice_ioprio(tsk);
3449                cfqq->ioprio_class = task_nice_ioclass(tsk);
3450                break;
3451        case IOPRIO_CLASS_RT:
3452                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3453                cfqq->ioprio_class = IOPRIO_CLASS_RT;
3454                break;
3455        case IOPRIO_CLASS_BE:
3456                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3457                cfqq->ioprio_class = IOPRIO_CLASS_BE;
3458                break;
3459        case IOPRIO_CLASS_IDLE:
3460                cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3461                cfqq->ioprio = 7;
3462                cfq_clear_cfqq_idle_window(cfqq);
3463                break;
3464        }
3465
3466        /*
3467         * keep track of original prio settings in case we have to temporarily
3468         * elevate the priority of this queue
3469         */
3470        cfqq->org_ioprio = cfqq->ioprio;
3471        cfq_clear_cfqq_prio_changed(cfqq);
3472}
3473
3474static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3475{
3476        int ioprio = cic->icq.ioc->ioprio;
3477        struct cfq_data *cfqd = cic_to_cfqd(cic);
3478        struct cfq_queue *cfqq;
3479
3480        /*
3481         * Check whether ioprio has changed.  The condition may trigger
3482         * spuriously on a newly created cic but there's no harm.
3483         */
3484        if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3485                return;
3486
3487        cfqq = cic->cfqq[BLK_RW_ASYNC];
3488        if (cfqq) {
3489                struct cfq_queue *new_cfqq;
3490                new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3491                                         GFP_ATOMIC);
3492                if (new_cfqq) {
3493                        cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3494                        cfq_put_queue(cfqq);
3495                }
3496        }
3497
3498        cfqq = cic->cfqq[BLK_RW_SYNC];
3499        if (cfqq)
3500                cfq_mark_cfqq_prio_changed(cfqq);
3501
3502        cic->ioprio = ioprio;
3503}
3504
3505static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3506                          pid_t pid, bool is_sync)
3507{
3508        RB_CLEAR_NODE(&cfqq->rb_node);
3509        RB_CLEAR_NODE(&cfqq->p_node);
3510        INIT_LIST_HEAD(&cfqq->fifo);
3511
3512        cfqq->ref = 0;
3513        cfqq->cfqd = cfqd;
3514
3515        cfq_mark_cfqq_prio_changed(cfqq);
3516
3517        if (is_sync) {
3518                if (!cfq_class_idle(cfqq))
3519                        cfq_mark_cfqq_idle_window(cfqq);
3520                cfq_mark_cfqq_sync(cfqq);
3521        }
3522        cfqq->pid = pid;
3523}
3524
3525#ifdef CONFIG_CFQ_GROUP_IOSCHED
3526static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3527{
3528        struct cfq_data *cfqd = cic_to_cfqd(cic);
3529        struct cfq_queue *sync_cfqq;
3530        uint64_t id;
3531
3532        rcu_read_lock();
3533        id = bio_blkcg(bio)->id;
3534        rcu_read_unlock();
3535
3536        /*
3537         * Check whether blkcg has changed.  The condition may trigger
3538         * spuriously on a newly created cic but there's no harm.
3539         */
3540        if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3541                return;
3542
3543        sync_cfqq = cic_to_cfqq(cic, 1);
3544        if (sync_cfqq) {
3545                /*
3546                 * Drop reference to sync queue. A new sync queue will be
3547                 * assigned in new group upon arrival of a fresh request.
3548                 */
3549                cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3550                cic_set_cfqq(cic, NULL, 1);
3551                cfq_put_queue(sync_cfqq);
3552        }
3553
3554        cic->blkcg_id = id;
3555}
3556#else
3557static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3558#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3559
3560static struct cfq_queue *
3561cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3562                     struct bio *bio, gfp_t gfp_mask)
3563{
3564        struct blkcg *blkcg;
3565        struct cfq_queue *cfqq, *new_cfqq = NULL;
3566        struct cfq_group *cfqg;
3567
3568retry:
3569        rcu_read_lock();
3570
3571        blkcg = bio_blkcg(bio);
3572        cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3573        cfqq = cic_to_cfqq(cic, is_sync);
3574
3575        /*
3576         * Always try a new alloc if we fell back to the OOM cfqq
3577         * originally, since it should just be a temporary situation.
3578         */
3579        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3580                cfqq = NULL;
3581                if (new_cfqq) {
3582                        cfqq = new_cfqq;
3583                        new_cfqq = NULL;
3584                } else if (gfp_mask & __GFP_WAIT) {
3585                        rcu_read_unlock();
3586                        spin_unlock_irq(cfqd->queue->queue_lock);
3587                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
3588                                        gfp_mask | __GFP_ZERO,
3589                                        cfqd->queue->node);
3590                        spin_lock_irq(cfqd->queue->queue_lock);
3591                        if (new_cfqq)
3592                                goto retry;
3593                        else
3594                                return &cfqd->oom_cfqq;
3595                } else {
3596                        cfqq = kmem_cache_alloc_node(cfq_pool,
3597                                        gfp_mask | __GFP_ZERO,
3598                                        cfqd->queue->node);
3599                }
3600
3601                if (cfqq) {
3602                        cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3603                        cfq_init_prio_data(cfqq, cic);
3604                        cfq_link_cfqq_cfqg(cfqq, cfqg);
3605                        cfq_log_cfqq(cfqd, cfqq, "alloced");
3606                } else
3607                        cfqq = &cfqd->oom_cfqq;
3608        }
3609
3610        if (new_cfqq)
3611                kmem_cache_free(cfq_pool, new_cfqq);
3612
3613        rcu_read_unlock();
3614        return cfqq;
3615}
3616
3617static struct cfq_queue **
3618cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3619{
3620        switch (ioprio_class) {
3621        case IOPRIO_CLASS_RT:
3622                return &cfqd->async_cfqq[0][ioprio];
3623        case IOPRIO_CLASS_NONE:
3624                ioprio = IOPRIO_NORM;
3625                /* fall through */
3626        case IOPRIO_CLASS_BE:
3627                return &cfqd->async_cfqq[1][ioprio];
3628        case IOPRIO_CLASS_IDLE:
3629                return &cfqd->async_idle_cfqq;
3630        default:
3631                BUG();
3632        }
3633}
3634
3635static struct cfq_queue *
3636cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3637              struct bio *bio, gfp_t gfp_mask)
3638{
3639        const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3640        const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3641        struct cfq_queue **async_cfqq = NULL;
3642        struct cfq_queue *cfqq = NULL;
3643
3644        if (!is_sync) {
3645                async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3646                cfqq = *async_cfqq;
3647        }
3648
3649        if (!cfqq)
3650                cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3651
3652        /*
3653         * pin the queue now that it's allocated, scheduler exit will prune it
3654         */
3655        if (!is_sync && !(*async_cfqq)) {
3656                cfqq->ref++;
3657                *async_cfqq = cfqq;
3658        }
3659
3660        cfqq->ref++;
3661        return cfqq;
3662}
3663
3664static void
3665__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3666{
3667        unsigned long elapsed = jiffies - ttime->last_end_request;
3668        elapsed = min(elapsed, 2UL * slice_idle);
3669
3670        ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3671        ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3672        ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3673}
3674
3675static void
3676cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3677                        struct cfq_io_cq *cic)
3678{
3679        if (cfq_cfqq_sync(cfqq)) {
3680                __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3681                __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3682                        cfqd->cfq_slice_idle);
3683        }
3684#ifdef CONFIG_CFQ_GROUP_IOSCHED
3685        __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3686#endif
3687}
3688
3689static void
3690cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3691                       struct request *rq)
3692{
3693        sector_t sdist = 0;
3694        sector_t n_sec = blk_rq_sectors(rq);
3695        if (cfqq->last_request_pos) {
3696                if (cfqq->last_request_pos < blk_rq_pos(rq))
3697                        sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3698                else
3699                        sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3700        }
3701
3702        cfqq->seek_history <<= 1;
3703        if (blk_queue_nonrot(cfqd->queue))
3704                cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3705        else
3706                cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3707}
3708
3709/*
3710 * Disable idle window if the process thinks too long or seeks so much that
3711 * it doesn't matter
3712 */
3713static void
3714cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3715                       struct cfq_io_cq *cic)
3716{
3717        int old_idle, enable_idle;
3718
3719        /*
3720         * Don't idle for async or idle io prio class
3721         */
3722        if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3723                return;
3724
3725        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3726
3727        if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3728                cfq_mark_cfqq_deep(cfqq);
3729
3730        if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3731                enable_idle = 0;
3732        else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3733                 !cfqd->cfq_slice_idle ||
3734                 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3735                enable_idle = 0;
3736        else if (sample_valid(cic->ttime.ttime_samples)) {
3737                if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3738                        enable_idle = 0;
3739                else
3740                        enable_idle = 1;
3741        }
3742
3743        if (old_idle != enable_idle) {
3744                cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3745                if (enable_idle)
3746                        cfq_mark_cfqq_idle_window(cfqq);
3747                else
3748                        cfq_clear_cfqq_idle_window(cfqq);
3749        }
3750}
3751
3752/*
3753 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3754 * no or if we aren't sure, a 1 will cause a preempt.
3755 */
3756static bool
3757cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3758                   struct request *rq)
3759{
3760        struct cfq_queue *cfqq;
3761
3762        cfqq = cfqd->active_queue;
3763        if (!cfqq)
3764                return false;
3765
3766        if (cfq_class_idle(new_cfqq))
3767                return false;
3768
3769        if (cfq_class_idle(cfqq))
3770                return true;
3771
3772        /*
3773         * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3774         */
3775        if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3776                return false;
3777
3778        /*
3779         * if the new request is sync, but the currently running queue is
3780         * not, let the sync request have priority.
3781         */
3782        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3783                return true;
3784
3785        if (new_cfqq->cfqg != cfqq->cfqg)
3786                return false;
3787
3788        if (cfq_slice_used(cfqq))
3789                return true;
3790
3791        /* Allow preemption only if we are idling on sync-noidle tree */
3792        if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3793            cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3794            new_cfqq->service_tree->count == 2 &&
3795            RB_EMPTY_ROOT(&cfqq->sort_list))
3796                return true;
3797
3798        /*
3799         * So both queues are sync. Let the new request get disk time if
3800         * it's a metadata request and the current queue is doing regular IO.
3801         */
3802        if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3803                return true;
3804
3805        /*
3806         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3807         */
3808        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3809                return true;
3810
3811        /* An idle queue should not be idle now for some reason */
3812        if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3813                return true;
3814
3815        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3816                return false;
3817
3818        /*
3819         * if this request is as-good as one we would expect from the
3820         * current cfqq, let it preempt
3821         */
3822        if (cfq_rq_close(cfqd, cfqq, rq))
3823                return true;
3824
3825        return false;
3826}
3827
3828/*
3829 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3830 * let it have half of its nominal slice.
3831 */
3832static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3833{
3834        enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3835
3836        cfq_log_cfqq(cfqd, cfqq, "preempt");
3837        cfq_slice_expired(cfqd, 1);
3838
3839        /*
3840         * workload type is changed, don't save slice, otherwise preempt
3841         * doesn't happen
3842         */
3843        if (old_type != cfqq_type(cfqq))
3844                cfqq->cfqg->saved_wl_slice = 0;
3845
3846        /*
3847         * Put the new queue at the front of the of the current list,
3848         * so we know that it will be selected next.
3849         */
3850        BUG_ON(!cfq_cfqq_on_rr(cfqq));
3851
3852        cfq_service_tree_add(cfqd, cfqq, 1);
3853
3854        cfqq->slice_end = 0;
3855        cfq_mark_cfqq_slice_new(cfqq);
3856}
3857
3858/*
3859 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3860 * something we should do about it
3861 */
3862static void
3863cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3864                struct request *rq)
3865{
3866        struct cfq_io_cq *cic = RQ_CIC(rq);
3867
3868        cfqd->rq_queued++;
3869        if (rq->cmd_flags & REQ_PRIO)
3870                cfqq->prio_pending++;
3871
3872        cfq_update_io_thinktime(cfqd, cfqq, cic);
3873        cfq_update_io_seektime(cfqd, cfqq, rq);
3874        cfq_update_idle_window(cfqd, cfqq, cic);
3875
3876        cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3877
3878        if (cfqq == cfqd->active_queue) {
3879                /*
3880                 * Remember that we saw a request from this process, but
3881                 * don't start queuing just yet. Otherwise we risk seeing lots
3882                 * of tiny requests, because we disrupt the normal plugging
3883                 * and merging. If the request is already larger than a single
3884                 * page, let it rip immediately. For that case we assume that
3885                 * merging is already done. Ditto for a busy system that
3886                 * has other work pending, don't risk delaying until the
3887                 * idle timer unplug to continue working.
3888                 */
3889                if (cfq_cfqq_wait_request(cfqq)) {
3890                        if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3891                            cfqd->busy_queues > 1) {
3892                                cfq_del_timer(cfqd, cfqq);
3893                                cfq_clear_cfqq_wait_request(cfqq);
3894                                __blk_run_queue(cfqd->queue);
3895                        } else {
3896                                cfqg_stats_update_idle_time(cfqq->cfqg);
3897                                cfq_mark_cfqq_must_dispatch(cfqq);
3898                        }
3899                }
3900        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3901                /*
3902                 * not the active queue - expire current slice if it is
3903                 * idle and has expired it's mean thinktime or this new queue
3904                 * has some old slice time left and is of higher priority or
3905                 * this new queue is RT and the current one is BE
3906                 */
3907                cfq_preempt_queue(cfqd, cfqq);
3908                __blk_run_queue(cfqd->queue);
3909        }
3910}
3911
3912static void cfq_insert_request(struct request_queue *q, struct request *rq)
3913{
3914        struct cfq_data *cfqd = q->elevator->elevator_data;
3915        struct cfq_queue *cfqq = RQ_CFQQ(rq);
3916
3917        cfq_log_cfqq(cfqd, cfqq, "insert_request");
3918        cfq_init_prio_data(cfqq, RQ_CIC(rq));
3919
3920        rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3921        list_add_tail(&rq->queuelist, &cfqq->fifo);
3922        cfq_add_rq_rb(rq);
3923        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3924                                 rq->cmd_flags);
3925        cfq_rq_enqueued(cfqd, cfqq, rq);
3926}
3927
3928/*
3929 * Update hw_tag based on peak queue depth over 50 samples under
3930 * sufficient load.
3931 */
3932static void cfq_update_hw_tag(struct cfq_data *cfqd)
3933{
3934        struct cfq_queue *cfqq = cfqd->active_queue;
3935
3936        if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3937                cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3938
3939        if (cfqd->hw_tag == 1)
3940                return;
3941
3942        if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3943            cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3944                return;
3945
3946        /*
3947         * If active queue hasn't enough requests and can idle, cfq might not
3948         * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3949         * case
3950         */
3951        if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3952            cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3953            CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3954                return;
3955
3956        if (cfqd->hw_tag_samples++ < 50)
3957                return;
3958
3959        if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3960                cfqd->hw_tag = 1;
3961        else
3962                cfqd->hw_tag = 0;
3963}
3964
3965static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3966{
3967        struct cfq_io_cq *cic = cfqd->active_cic;
3968
3969        /* If the queue already has requests, don't wait */
3970        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3971                return false;
3972
3973        /* If there are other queues in the group, don't wait */
3974        if (cfqq->cfqg->nr_cfqq > 1)
3975                return false;
3976
3977        /* the only queue in the group, but think time is big */
3978        if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3979                return false;
3980
3981        if (cfq_slice_used(cfqq))
3982                return true;
3983
3984        /* if slice left is less than think time, wait busy */
3985        if (cic && sample_valid(cic->ttime.ttime_samples)
3986            && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3987                return true;
3988
3989        /*
3990         * If think times is less than a jiffy than ttime_mean=0 and above
3991         * will not be true. It might happen that slice has not expired yet
3992         * but will expire soon (4-5 ns) during select_queue(). To cover the
3993         * case where think time is less than a jiffy, mark the queue wait
3994         * busy if only 1 jiffy is left in the slice.
3995         */
3996        if (cfqq->slice_end - jiffies == 1)
3997                return true;
3998
3999        return false;
4000}
4001
4002static void cfq_completed_request(struct request_queue *q, struct request *rq)
4003{
4004        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4005        struct cfq_data *cfqd = cfqq->cfqd;
4006        const int sync = rq_is_sync(rq);
4007        unsigned long now;
4008
4009        now = jiffies;
4010        cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4011                     !!(rq->cmd_flags & REQ_NOIDLE));
4012
4013        cfq_update_hw_tag(cfqd);
4014
4015        WARN_ON(!cfqd->rq_in_driver);
4016        WARN_ON(!cfqq->dispatched);
4017        cfqd->rq_in_driver--;
4018        cfqq->dispatched--;
4019        (RQ_CFQG(rq))->dispatched--;
4020        cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4021                                     rq_io_start_time_ns(rq), rq->cmd_flags);
4022
4023        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4024
4025        if (sync) {
4026                struct cfq_rb_root *st;
4027
4028                RQ_CIC(rq)->ttime.last_end_request = now;
4029
4030                if (cfq_cfqq_on_rr(cfqq))
4031                        st = cfqq->service_tree;
4032                else
4033                        st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4034                                        cfqq_type(cfqq));
4035
4036                st->ttime.last_end_request = now;
4037                if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4038                        cfqd->last_delayed_sync = now;
4039        }
4040
4041#ifdef CONFIG_CFQ_GROUP_IOSCHED
4042        cfqq->cfqg->ttime.last_end_request = now;
4043#endif
4044
4045        /*
4046         * If this is the active queue, check if it needs to be expired,
4047         * or if we want to idle in case it has no pending requests.
4048         */
4049        if (cfqd->active_queue == cfqq) {
4050                const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4051
4052                if (cfq_cfqq_slice_new(cfqq)) {
4053                        cfq_set_prio_slice(cfqd, cfqq);
4054                        cfq_clear_cfqq_slice_new(cfqq);
4055                }
4056
4057                /*
4058                 * Should we wait for next request to come in before we expire
4059                 * the queue.
4060                 */
4061                if (cfq_should_wait_busy(cfqd, cfqq)) {
4062                        unsigned long extend_sl = cfqd->cfq_slice_idle;
4063                        if (!cfqd->cfq_slice_idle)
4064                                extend_sl = cfqd->cfq_group_idle;
4065                        cfqq->slice_end = jiffies + extend_sl;
4066                        cfq_mark_cfqq_wait_busy(cfqq);
4067                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4068                }
4069
4070                /*
4071                 * Idling is not enabled on:
4072                 * - expired queues
4073                 * - idle-priority queues
4074                 * - async queues
4075                 * - queues with still some requests queued
4076                 * - when there is a close cooperator
4077                 */
4078                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4079                        cfq_slice_expired(cfqd, 1);
4080                else if (sync && cfqq_empty &&
4081                         !cfq_close_cooperator(cfqd, cfqq)) {
4082                        cfq_arm_slice_timer(cfqd);
4083                }
4084        }
4085
4086        if (!cfqd->rq_in_driver)
4087                cfq_schedule_dispatch(cfqd);
4088}
4089
4090static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4091{
4092        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4093                cfq_mark_cfqq_must_alloc_slice(cfqq);
4094                return ELV_MQUEUE_MUST;
4095        }
4096
4097        return ELV_MQUEUE_MAY;
4098}
4099
4100static int cfq_may_queue(struct request_queue *q, int rw)
4101{
4102        struct cfq_data *cfqd = q->elevator->elevator_data;
4103        struct task_struct *tsk = current;
4104        struct cfq_io_cq *cic;
4105        struct cfq_queue *cfqq;
4106
4107        /*
4108         * don't force setup of a queue from here, as a call to may_queue
4109         * does not necessarily imply that a request actually will be queued.
4110         * so just lookup a possibly existing queue, or return 'may queue'
4111         * if that fails
4112         */
4113        cic = cfq_cic_lookup(cfqd, tsk->io_context);
4114        if (!cic)
4115                return ELV_MQUEUE_MAY;
4116
4117        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4118        if (cfqq) {
4119                cfq_init_prio_data(cfqq, cic);
4120
4121                return __cfq_may_queue(cfqq);
4122        }
4123
4124        return ELV_MQUEUE_MAY;
4125}
4126
4127/*
4128 * queue lock held here
4129 */
4130static void cfq_put_request(struct request *rq)
4131{
4132        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4133
4134        if (cfqq) {
4135                const int rw = rq_data_dir(rq);
4136
4137                BUG_ON(!cfqq->allocated[rw]);
4138                cfqq->allocated[rw]--;
4139
4140                /* Put down rq reference on cfqg */
4141                cfqg_put(RQ_CFQG(rq));
4142                rq->elv.priv[0] = NULL;
4143                rq->elv.priv[1] = NULL;
4144
4145                cfq_put_queue(cfqq);
4146        }
4147}
4148
4149static struct cfq_queue *
4150cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4151                struct cfq_queue *cfqq)
4152{
4153        cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4154        cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4155        cfq_mark_cfqq_coop(cfqq->new_cfqq);
4156        cfq_put_queue(cfqq);
4157        return cic_to_cfqq(cic, 1);
4158}
4159
4160/*
4161 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4162 * was the last process referring to said cfqq.
4163 */
4164static struct cfq_queue *
4165split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4166{
4167        if (cfqq_process_refs(cfqq) == 1) {
4168                cfqq->pid = current->pid;
4169                cfq_clear_cfqq_coop(cfqq);
4170                cfq_clear_cfqq_split_coop(cfqq);
4171                return cfqq;
4172        }
4173
4174        cic_set_cfqq(cic, NULL, 1);
4175
4176        cfq_put_cooperator(cfqq);
4177
4178        cfq_put_queue(cfqq);
4179        return NULL;
4180}
4181/*
4182 * Allocate cfq data structures associated with this request.
4183 */
4184static int
4185cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4186                gfp_t gfp_mask)
4187{
4188        struct cfq_data *cfqd = q->elevator->elevator_data;
4189        struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4190        const int rw = rq_data_dir(rq);
4191        const bool is_sync = rq_is_sync(rq);
4192        struct cfq_queue *cfqq;
4193
4194        might_sleep_if(gfp_mask & __GFP_WAIT);
4195
4196        spin_lock_irq(q->queue_lock);
4197
4198        check_ioprio_changed(cic, bio);
4199        check_blkcg_changed(cic, bio);
4200new_queue:
4201        cfqq = cic_to_cfqq(cic, is_sync);
4202        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4203                cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
4204                cic_set_cfqq(cic, cfqq, is_sync);
4205        } else {
4206                /*
4207                 * If the queue was seeky for too long, break it apart.
4208                 */
4209                if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4210                        cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4211                        cfqq = split_cfqq(cic, cfqq);
4212                        if (!cfqq)
4213                                goto new_queue;
4214                }
4215
4216                /*
4217                 * Check to see if this queue is scheduled to merge with
4218                 * another, closely cooperating queue.  The merging of
4219                 * queues happens here as it must be done in process context.
4220                 * The reference on new_cfqq was taken in merge_cfqqs.
4221                 */
4222                if (cfqq->new_cfqq)
4223                        cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4224        }
4225
4226        cfqq->allocated[rw]++;
4227
4228        cfqq->ref++;
4229        cfqg_get(cfqq->cfqg);
4230        rq->elv.priv[0] = cfqq;
4231        rq->elv.priv[1] = cfqq->cfqg;
4232        spin_unlock_irq(q->queue_lock);
4233        return 0;
4234}
4235
4236static void cfq_kick_queue(struct work_struct *work)
4237{
4238        struct cfq_data *cfqd =
4239                container_of(work, struct cfq_data, unplug_work);
4240        struct request_queue *q = cfqd->queue;
4241
4242        spin_lock_irq(q->queue_lock);
4243        __blk_run_queue(cfqd->queue);
4244        spin_unlock_irq(q->queue_lock);
4245}
4246
4247/*
4248 * Timer running if the active_queue is currently idling inside its time slice
4249 */
4250static void cfq_idle_slice_timer(unsigned long data)
4251{
4252        struct cfq_data *cfqd = (struct cfq_data *) data;
4253        struct cfq_queue *cfqq;
4254        unsigned long flags;
4255        int timed_out = 1;
4256
4257        cfq_log(cfqd, "idle timer fired");
4258
4259        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4260
4261        cfqq = cfqd->active_queue;
4262        if (cfqq) {
4263                timed_out = 0;
4264
4265                /*
4266                 * We saw a request before the queue expired, let it through
4267                 */
4268                if (cfq_cfqq_must_dispatch(cfqq))
4269                        goto out_kick;
4270
4271                /*
4272                 * expired
4273                 */
4274                if (cfq_slice_used(cfqq))
4275                        goto expire;
4276
4277                /*
4278                 * only expire and reinvoke request handler, if there are
4279                 * other queues with pending requests
4280                 */
4281                if (!cfqd->busy_queues)
4282                        goto out_cont;
4283
4284                /*
4285                 * not expired and it has a request pending, let it dispatch
4286                 */
4287                if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4288                        goto out_kick;
4289
4290                /*
4291                 * Queue depth flag is reset only when the idle didn't succeed
4292                 */
4293                cfq_clear_cfqq_deep(cfqq);
4294        }
4295expire:
4296        cfq_slice_expired(cfqd, timed_out);
4297out_kick:
4298        cfq_schedule_dispatch(cfqd);
4299out_cont:
4300        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4301}
4302
4303static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4304{
4305        del_timer_sync(&cfqd->idle_slice_timer);
4306        cancel_work_sync(&cfqd->unplug_work);
4307}
4308
4309static void cfq_put_async_queues(struct cfq_data *cfqd)
4310{
4311        int i;
4312
4313        for (i = 0; i < IOPRIO_BE_NR; i++) {
4314                if (cfqd->async_cfqq[0][i])
4315                        cfq_put_queue(cfqd->async_cfqq[0][i]);
4316                if (cfqd->async_cfqq[1][i])
4317                        cfq_put_queue(cfqd->async_cfqq[1][i]);
4318        }
4319
4320        if (cfqd->async_idle_cfqq)
4321                cfq_put_queue(cfqd->async_idle_cfqq);
4322}
4323
4324static void cfq_exit_queue(struct elevator_queue *e)
4325{
4326        struct cfq_data *cfqd = e->elevator_data;
4327        struct request_queue *q = cfqd->queue;
4328
4329        cfq_shutdown_timer_wq(cfqd);
4330
4331        spin_lock_irq(q->queue_lock);
4332
4333        if (cfqd->active_queue)
4334                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4335
4336        cfq_put_async_queues(cfqd);
4337
4338        spin_unlock_irq(q->queue_lock);
4339
4340        cfq_shutdown_timer_wq(cfqd);
4341
4342#ifdef CONFIG_CFQ_GROUP_IOSCHED
4343        blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4344#else
4345        kfree(cfqd->root_group);
4346#endif
4347        kfree(cfqd);
4348}
4349
4350static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4351{
4352        struct cfq_data *cfqd;
4353        struct blkcg_gq *blkg __maybe_unused;
4354        int i, ret;
4355        struct elevator_queue *eq;
4356
4357        eq = elevator_alloc(q, e);
4358        if (!eq)
4359                return -ENOMEM;
4360
4361        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
4362        if (!cfqd) {
4363                kobject_put(&eq->kobj);
4364                return -ENOMEM;
4365        }
4366        eq->elevator_data = cfqd;
4367
4368        cfqd->queue = q;
4369        spin_lock_irq(q->queue_lock);
4370        q->elevator = eq;
4371        spin_unlock_irq(q->queue_lock);
4372
4373        /* Init root service tree */
4374        cfqd->grp_service_tree = CFQ_RB_ROOT;
4375
4376        /* Init root group and prefer root group over other groups by default */
4377#ifdef CONFIG_CFQ_GROUP_IOSCHED
4378        ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4379        if (ret)
4380                goto out_free;
4381
4382        cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4383#else
4384        ret = -ENOMEM;
4385        cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4386                                        GFP_KERNEL, cfqd->queue->node);
4387        if (!cfqd->root_group)
4388                goto out_free;
4389
4390        cfq_init_cfqg_base(cfqd->root_group);
4391#endif
4392        cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
4393        cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
4394
4395        /*
4396         * Not strictly needed (since RB_ROOT just clears the node and we
4397         * zeroed cfqd on alloc), but better be safe in case someone decides
4398         * to add magic to the rb code
4399         */
4400        for (i = 0; i < CFQ_PRIO_LISTS; i++)
4401                cfqd->prio_trees[i] = RB_ROOT;
4402
4403        /*
4404         * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4405         * Grab a permanent reference to it, so that the normal code flow
4406         * will not attempt to free it.  oom_cfqq is linked to root_group
4407         * but shouldn't hold a reference as it'll never be unlinked.  Lose
4408         * the reference from linking right away.
4409         */
4410        cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4411        cfqd->oom_cfqq.ref++;
4412
4413        spin_lock_irq(q->queue_lock);
4414        cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4415        cfqg_put(cfqd->root_group);
4416        spin_unlock_irq(q->queue_lock);
4417
4418        init_timer(&cfqd->idle_slice_timer);
4419        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4420        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4421
4422        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4423
4424        cfqd->cfq_quantum = cfq_quantum;
4425        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4426        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4427        cfqd->cfq_back_max = cfq_back_max;
4428        cfqd->cfq_back_penalty = cfq_back_penalty;
4429        cfqd->cfq_slice[0] = cfq_slice_async;
4430        cfqd->cfq_slice[1] = cfq_slice_sync;
4431        cfqd->cfq_target_latency = cfq_target_latency;
4432        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4433        cfqd->cfq_slice_idle = cfq_slice_idle;
4434        cfqd->cfq_group_idle = cfq_group_idle;
4435        cfqd->cfq_latency = 1;
4436        cfqd->hw_tag = -1;
4437        /*
4438         * we optimistically start assuming sync ops weren't delayed in last
4439         * second, in order to have larger depth for async operations.
4440         */
4441        cfqd->last_delayed_sync = jiffies - HZ;
4442        return 0;
4443
4444out_free:
4445        kfree(cfqd);
4446        kobject_put(&eq->kobj);
4447        return ret;
4448}
4449
4450/*
4451 * sysfs parts below -->
4452 */
4453static ssize_t
4454cfq_var_show(unsigned int var, char *page)
4455{
4456        return sprintf(page, "%d\n", var);
4457}
4458
4459static ssize_t
4460cfq_var_store(unsigned int *var, const char *page