linux/block/cfq-iosched.c
<<
>>
Prefs
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
  17#include "blk.h"
  18#include "blk-cgroup.h"
  19
  20/*
  21 * tunables
  22 */
  23/* max queue in one round of service */
  24static const int cfq_quantum = 8;
  25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26/* maximum backwards seek, in KiB */
  27static const int cfq_back_max = 16 * 1024;
  28/* penalty of a backwards seek */
  29static const int cfq_back_penalty = 2;
  30static const int cfq_slice_sync = HZ / 10;
  31static int cfq_slice_async = HZ / 25;
  32static const int cfq_slice_async_rq = 2;
  33static int cfq_slice_idle = HZ / 125;
  34static int cfq_group_idle = HZ / 125;
  35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36static const int cfq_hist_divisor = 4;
  37
  38/*
  39 * offset from end of service tree
  40 */
  41#define CFQ_IDLE_DELAY          (HZ / 5)
  42
  43/*
  44 * below this threshold, we consider thinktime immediate
  45 */
  46#define CFQ_MIN_TT              (2)
  47
  48#define CFQ_SLICE_SCALE         (5)
  49#define CFQ_HW_QUEUE_MIN        (5)
  50#define CFQ_SERVICE_SHIFT       12
  51
  52#define CFQQ_SEEK_THR           (sector_t)(8 * 100)
  53#define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
  54#define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
  55#define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
  56
  57#define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
  58#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
  59#define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
  60
  61static struct kmem_cache *cfq_pool;
  62
  63#define CFQ_PRIO_LISTS          IOPRIO_BE_NR
  64#define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  65#define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  66
  67#define sample_valid(samples)   ((samples) > 80)
  68#define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
  69
  70struct cfq_ttime {
  71        unsigned long last_end_request;
  72
  73        unsigned long ttime_total;
  74        unsigned long ttime_samples;
  75        unsigned long ttime_mean;
  76};
  77
  78/*
  79 * Most of our rbtree usage is for sorting with min extraction, so
  80 * if we cache the leftmost node we don't have to walk down the tree
  81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  82 * move this into the elevator for the rq sorting as well.
  83 */
  84struct cfq_rb_root {
  85        struct rb_root rb;
  86        struct rb_node *left;
  87        unsigned count;
  88        u64 min_vdisktime;
  89        struct cfq_ttime ttime;
  90};
  91#define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
  92                        .ttime = {.last_end_request = jiffies,},}
  93
  94/*
  95 * Per process-grouping structure
  96 */
  97struct cfq_queue {
  98        /* reference count */
  99        int ref;
 100        /* various state flags, see below */
 101        unsigned int flags;
 102        /* parent cfq_data */
 103        struct cfq_data *cfqd;
 104        /* service_tree member */
 105        struct rb_node rb_node;
 106        /* service_tree key */
 107        unsigned long rb_key;
 108        /* prio tree member */
 109        struct rb_node p_node;
 110        /* prio tree root we belong to, if any */
 111        struct rb_root *p_root;
 112        /* sorted list of pending requests */
 113        struct rb_root sort_list;
 114        /* if fifo isn't expired, next request to serve */
 115        struct request *next_rq;
 116        /* requests queued in sort_list */
 117        int queued[2];
 118        /* currently allocated requests */
 119        int allocated[2];
 120        /* fifo list of requests in sort_list */
 121        struct list_head fifo;
 122
 123        /* time when queue got scheduled in to dispatch first request. */
 124        unsigned long dispatch_start;
 125        unsigned int allocated_slice;
 126        unsigned int slice_dispatch;
 127        /* time when first request from queue completed and slice started. */
 128        unsigned long slice_start;
 129        unsigned long slice_end;
 130        long slice_resid;
 131
 132        /* pending priority requests */
 133        int prio_pending;
 134        /* number of requests that are on the dispatch list or inside driver */
 135        int dispatched;
 136
 137        /* io prio of this group */
 138        unsigned short ioprio, org_ioprio;
 139        unsigned short ioprio_class;
 140
 141        pid_t pid;
 142
 143        u32 seek_history;
 144        sector_t last_request_pos;
 145
 146        struct cfq_rb_root *service_tree;
 147        struct cfq_queue *new_cfqq;
 148        struct cfq_group *cfqg;
 149        /* Number of sectors dispatched from queue in single dispatch round */
 150        unsigned long nr_sectors;
 151};
 152
 153/*
 154 * First index in the service_trees.
 155 * IDLE is handled separately, so it has negative index
 156 */
 157enum wl_class_t {
 158        BE_WORKLOAD = 0,
 159        RT_WORKLOAD = 1,
 160        IDLE_WORKLOAD = 2,
 161        CFQ_PRIO_NR,
 162};
 163
 164/*
 165 * Second index in the service_trees.
 166 */
 167enum wl_type_t {
 168        ASYNC_WORKLOAD = 0,
 169        SYNC_NOIDLE_WORKLOAD = 1,
 170        SYNC_WORKLOAD = 2
 171};
 172
 173struct cfqg_stats {
 174#ifdef CONFIG_CFQ_GROUP_IOSCHED
 175        /* total bytes transferred */
 176        struct blkg_rwstat              service_bytes;
 177        /* total IOs serviced, post merge */
 178        struct blkg_rwstat              serviced;
 179        /* number of ios merged */
 180        struct blkg_rwstat              merged;
 181        /* total time spent on device in ns, may not be accurate w/ queueing */
 182        struct blkg_rwstat              service_time;
 183        /* total time spent waiting in scheduler queue in ns */
 184        struct blkg_rwstat              wait_time;
 185        /* number of IOs queued up */
 186        struct blkg_rwstat              queued;
 187        /* total sectors transferred */
 188        struct blkg_stat                sectors;
 189        /* total disk time and nr sectors dispatched by this group */
 190        struct blkg_stat                time;
 191#ifdef CONFIG_DEBUG_BLK_CGROUP
 192        /* time not charged to this cgroup */
 193        struct blkg_stat                unaccounted_time;
 194        /* sum of number of ios queued across all samples */
 195        struct blkg_stat                avg_queue_size_sum;
 196        /* count of samples taken for average */
 197        struct blkg_stat                avg_queue_size_samples;
 198        /* how many times this group has been removed from service tree */
 199        struct blkg_stat                dequeue;
 200        /* total time spent waiting for it to be assigned a timeslice. */
 201        struct blkg_stat                group_wait_time;
 202        /* time spent idling for this blkcg_gq */
 203        struct blkg_stat                idle_time;
 204        /* total time with empty current active q with other requests queued */
 205        struct blkg_stat                empty_time;
 206        /* fields after this shouldn't be cleared on stat reset */
 207        uint64_t                        start_group_wait_time;
 208        uint64_t                        start_idle_time;
 209        uint64_t                        start_empty_time;
 210        uint16_t                        flags;
 211#endif  /* CONFIG_DEBUG_BLK_CGROUP */
 212#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 213};
 214
 215/* This is per cgroup per device grouping structure */
 216struct cfq_group {
 217        /* must be the first member */
 218        struct blkg_policy_data pd;
 219
 220        /* group service_tree member */
 221        struct rb_node rb_node;
 222
 223        /* group service_tree key */
 224        u64 vdisktime;
 225
 226        /*
 227         * The number of active cfqgs and sum of their weights under this
 228         * cfqg.  This covers this cfqg's leaf_weight and all children's
 229         * weights, but does not cover weights of further descendants.
 230         *
 231         * If a cfqg is on the service tree, it's active.  An active cfqg
 232         * also activates its parent and contributes to the children_weight
 233         * of the parent.
 234         */
 235        int nr_active;
 236        unsigned int children_weight;
 237
 238        /*
 239         * vfraction is the fraction of vdisktime that the tasks in this
 240         * cfqg are entitled to.  This is determined by compounding the
 241         * ratios walking up from this cfqg to the root.
 242         *
 243         * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
 244         * vfractions on a service tree is approximately 1.  The sum may
 245         * deviate a bit due to rounding errors and fluctuations caused by
 246         * cfqgs entering and leaving the service tree.
 247         */
 248        unsigned int vfraction;
 249
 250        /*
 251         * There are two weights - (internal) weight is the weight of this
 252         * cfqg against the sibling cfqgs.  leaf_weight is the wight of
 253         * this cfqg against the child cfqgs.  For the root cfqg, both
 254         * weights are kept in sync for backward compatibility.
 255         */
 256        unsigned int weight;
 257        unsigned int new_weight;
 258        unsigned int dev_weight;
 259
 260        unsigned int leaf_weight;
 261        unsigned int new_leaf_weight;
 262        unsigned int dev_leaf_weight;
 263
 264        /* number of cfqq currently on this group */
 265        int nr_cfqq;
 266
 267        /*
 268         * Per group busy queues average. Useful for workload slice calc. We
 269         * create the array for each prio class but at run time it is used
 270         * only for RT and BE class and slot for IDLE class remains unused.
 271         * This is primarily done to avoid confusion and a gcc warning.
 272         */
 273        unsigned int busy_queues_avg[CFQ_PRIO_NR];
 274        /*
 275         * rr lists of queues with requests. We maintain service trees for
 276         * RT and BE classes. These trees are subdivided in subclasses
 277         * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 278         * class there is no subclassification and all the cfq queues go on
 279         * a single tree service_tree_idle.
 280         * Counts are embedded in the cfq_rb_root
 281         */
 282        struct cfq_rb_root service_trees[2][3];
 283        struct cfq_rb_root service_tree_idle;
 284
 285        unsigned long saved_wl_slice;
 286        enum wl_type_t saved_wl_type;
 287        enum wl_class_t saved_wl_class;
 288
 289        /* number of requests that are on the dispatch list or inside driver */
 290        int dispatched;
 291        struct cfq_ttime ttime;
 292        struct cfqg_stats stats;        /* stats for this cfqg */
 293        struct cfqg_stats dead_stats;   /* stats pushed from dead children */
 294};
 295
 296struct cfq_io_cq {
 297        struct io_cq            icq;            /* must be the first member */
 298        struct cfq_queue        *cfqq[2];
 299        struct cfq_ttime        ttime;
 300        int                     ioprio;         /* the current ioprio */
 301#ifdef CONFIG_CFQ_GROUP_IOSCHED
 302        uint64_t                blkcg_id;       /* the current blkcg ID */
 303#endif
 304};
 305
 306/*
 307 * Per block device queue structure
 308 */
 309struct cfq_data {
 310        struct request_queue *queue;
 311        /* Root service tree for cfq_groups */
 312        struct cfq_rb_root grp_service_tree;
 313        struct cfq_group *root_group;
 314
 315        /*
 316         * The priority currently being served
 317         */
 318        enum wl_class_t serving_wl_class;
 319        enum wl_type_t serving_wl_type;
 320        unsigned long workload_expires;
 321        struct cfq_group *serving_group;
 322
 323        /*
 324         * Each priority tree is sorted by next_request position.  These
 325         * trees are used when determining if two or more queues are
 326         * interleaving requests (see cfq_close_cooperator).
 327         */
 328        struct rb_root prio_trees[CFQ_PRIO_LISTS];
 329
 330        unsigned int busy_queues;
 331        unsigned int busy_sync_queues;
 332
 333        int rq_in_driver;
 334        int rq_in_flight[2];
 335
 336        /*
 337         * queue-depth detection
 338         */
 339        int rq_queued;
 340        int hw_tag;
 341        /*
 342         * hw_tag can be
 343         * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 344         *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 345         *  0 => no NCQ
 346         */
 347        int hw_tag_est_depth;
 348        unsigned int hw_tag_samples;
 349
 350        /*
 351         * idle window management
 352         */
 353        struct timer_list idle_slice_timer;
 354        struct work_struct unplug_work;
 355
 356        struct cfq_queue *active_queue;
 357        struct cfq_io_cq *active_cic;
 358
 359        /*
 360         * async queue for each priority case
 361         */
 362        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 363        struct cfq_queue *async_idle_cfqq;
 364
 365        sector_t last_position;
 366
 367        /*
 368         * tunables, see top of file
 369         */
 370        unsigned int cfq_quantum;
 371        unsigned int cfq_fifo_expire[2];
 372        unsigned int cfq_back_penalty;
 373        unsigned int cfq_back_max;
 374        unsigned int cfq_slice[2];
 375        unsigned int cfq_slice_async_rq;
 376        unsigned int cfq_slice_idle;
 377        unsigned int cfq_group_idle;
 378        unsigned int cfq_latency;
 379        unsigned int cfq_target_latency;
 380
 381        /*
 382         * Fallback dummy cfqq for extreme OOM conditions
 383         */
 384        struct cfq_queue oom_cfqq;
 385
 386        unsigned long last_delayed_sync;
 387};
 388
 389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 390
 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
 392                                            enum wl_class_t class,
 393                                            enum wl_type_t type)
 394{
 395        if (!cfqg)
 396                return NULL;
 397
 398        if (class == IDLE_WORKLOAD)
 399                return &cfqg->service_tree_idle;
 400
 401        return &cfqg->service_trees[class][type];
 402}
 403
 404enum cfqq_state_flags {
 405        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
 406        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
 407        CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
 408        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
 409        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 410        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
 411        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
 412        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
 413        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 414        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
 415        CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
 416        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
 417        CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
 418};
 419
 420#define CFQ_CFQQ_FNS(name)                                              \
 421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
 422{                                                                       \
 423        (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 424}                                                                       \
 425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
 426{                                                                       \
 427        (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 428}                                                                       \
 429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
 430{                                                                       \
 431        return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 432}
 433
 434CFQ_CFQQ_FNS(on_rr);
 435CFQ_CFQQ_FNS(wait_request);
 436CFQ_CFQQ_FNS(must_dispatch);
 437CFQ_CFQQ_FNS(must_alloc_slice);
 438CFQ_CFQQ_FNS(fifo_expire);
 439CFQ_CFQQ_FNS(idle_window);
 440CFQ_CFQQ_FNS(prio_changed);
 441CFQ_CFQQ_FNS(slice_new);
 442CFQ_CFQQ_FNS(sync);
 443CFQ_CFQQ_FNS(coop);
 444CFQ_CFQQ_FNS(split_coop);
 445CFQ_CFQQ_FNS(deep);
 446CFQ_CFQQ_FNS(wait_busy);
 447#undef CFQ_CFQQ_FNS
 448
 449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 450{
 451        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 452}
 453
 454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 455{
 456        return pd_to_blkg(&cfqg->pd);
 457}
 458
 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 460
 461/* cfqg stats flags */
 462enum cfqg_stats_flags {
 463        CFQG_stats_waiting = 0,
 464        CFQG_stats_idling,
 465        CFQG_stats_empty,
 466};
 467
 468#define CFQG_FLAG_FNS(name)                                             \
 469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
 470{                                                                       \
 471        stats->flags |= (1 << CFQG_stats_##name);                       \
 472}                                                                       \
 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
 474{                                                                       \
 475        stats->flags &= ~(1 << CFQG_stats_##name);                      \
 476}                                                                       \
 477static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
 478{                                                                       \
 479        return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
 480}                                                                       \
 481
 482CFQG_FLAG_FNS(waiting)
 483CFQG_FLAG_FNS(idling)
 484CFQG_FLAG_FNS(empty)
 485#undef CFQG_FLAG_FNS
 486
 487/* This should be called with the queue_lock held. */
 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 489{
 490        unsigned long long now;
 491
 492        if (!cfqg_stats_waiting(stats))
 493                return;
 494
 495        now = sched_clock();
 496        if (time_after64(now, stats->start_group_wait_time))
 497                blkg_stat_add(&stats->group_wait_time,
 498                              now - stats->start_group_wait_time);
 499        cfqg_stats_clear_waiting(stats);
 500}
 501
 502/* This should be called with the queue_lock held. */
 503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 504                                                 struct cfq_group *curr_cfqg)
 505{
 506        struct cfqg_stats *stats = &cfqg->stats;
 507
 508        if (cfqg_stats_waiting(stats))
 509                return;
 510        if (cfqg == curr_cfqg)
 511                return;
 512        stats->start_group_wait_time = sched_clock();
 513        cfqg_stats_mark_waiting(stats);
 514}
 515
 516/* This should be called with the queue_lock held. */
 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 518{
 519        unsigned long long now;
 520
 521        if (!cfqg_stats_empty(stats))
 522                return;
 523
 524        now = sched_clock();
 525        if (time_after64(now, stats->start_empty_time))
 526                blkg_stat_add(&stats->empty_time,
 527                              now - stats->start_empty_time);
 528        cfqg_stats_clear_empty(stats);
 529}
 530
 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 532{
 533        blkg_stat_add(&cfqg->stats.dequeue, 1);
 534}
 535
 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 537{
 538        struct cfqg_stats *stats = &cfqg->stats;
 539
 540        if (blkg_rwstat_total(&stats->queued))
 541                return;
 542
 543        /*
 544         * group is already marked empty. This can happen if cfqq got new
 545         * request in parent group and moved to this group while being added
 546         * to service tree. Just ignore the event and move on.
 547         */
 548        if (cfqg_stats_empty(stats))
 549                return;
 550
 551        stats->start_empty_time = sched_clock();
 552        cfqg_stats_mark_empty(stats);
 553}
 554
 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 556{
 557        struct cfqg_stats *stats = &cfqg->stats;
 558
 559        if (cfqg_stats_idling(stats)) {
 560                unsigned long long now = sched_clock();
 561
 562                if (time_after64(now, stats->start_idle_time))
 563                        blkg_stat_add(&stats->idle_time,
 564                                      now - stats->start_idle_time);
 565                cfqg_stats_clear_idling(stats);
 566        }
 567}
 568
 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 570{
 571        struct cfqg_stats *stats = &cfqg->stats;
 572
 573        BUG_ON(cfqg_stats_idling(stats));
 574
 575        stats->start_idle_time = sched_clock();
 576        cfqg_stats_mark_idling(stats);
 577}
 578
 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 580{
 581        struct cfqg_stats *stats = &cfqg->stats;
 582
 583        blkg_stat_add(&stats->avg_queue_size_sum,
 584                      blkg_rwstat_total(&stats->queued));
 585        blkg_stat_add(&stats->avg_queue_size_samples, 1);
 586        cfqg_stats_update_group_wait_time(stats);
 587}
 588
 589#else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 590
 591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 598
 599#endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 600
 601#ifdef CONFIG_CFQ_GROUP_IOSCHED
 602
 603static struct blkcg_policy blkcg_policy_cfq;
 604
 605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 606{
 607        return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 608}
 609
 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
 611{
 612        struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
 613
 614        return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 615}
 616
 617static inline void cfqg_get(struct cfq_group *cfqg)
 618{
 619        return blkg_get(cfqg_to_blkg(cfqg));
 620}
 621
 622static inline void cfqg_put(struct cfq_group *cfqg)
 623{
 624        return blkg_put(cfqg_to_blkg(cfqg));
 625}
 626
 627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  do {                    \
 628        char __pbuf[128];                                               \
 629                                                                        \
 630        blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
 631        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
 632                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 633                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 634                          __pbuf, ##args);                              \
 635} while (0)
 636
 637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)  do {                    \
 638        char __pbuf[128];                                               \
 639                                                                        \
 640        blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
 641        blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
 642} while (0)
 643
 644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 645                                            struct cfq_group *curr_cfqg, int rw)
 646{
 647        blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 648        cfqg_stats_end_empty_time(&cfqg->stats);
 649        cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 650}
 651
 652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 653                        unsigned long time, unsigned long unaccounted_time)
 654{
 655        blkg_stat_add(&cfqg->stats.time, time);
 656#ifdef CONFIG_DEBUG_BLK_CGROUP
 657        blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 658#endif
 659}
 660
 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 662{
 663        blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 664}
 665
 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 667{
 668        blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 669}
 670
 671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 672                                              uint64_t bytes, int rw)
 673{
 674        blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
 675        blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
 676        blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
 677}
 678
 679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 680                        uint64_t start_time, uint64_t io_start_time, int rw)
 681{
 682        struct cfqg_stats *stats = &cfqg->stats;
 683        unsigned long long now = sched_clock();
 684
 685        if (time_after64(now, io_start_time))
 686                blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 687        if (time_after64(io_start_time, start_time))
 688                blkg_rwstat_add(&stats->wait_time, rw,
 689                                io_start_time - start_time);
 690}
 691
 692/* @stats = 0 */
 693static void cfqg_stats_reset(struct cfqg_stats *stats)
 694{
 695        /* queued stats shouldn't be cleared */
 696        blkg_rwstat_reset(&stats->service_bytes);
 697        blkg_rwstat_reset(&stats->serviced);
 698        blkg_rwstat_reset(&stats->merged);
 699        blkg_rwstat_reset(&stats->service_time);
 700        blkg_rwstat_reset(&stats->wait_time);
 701        blkg_stat_reset(&stats->time);
 702#ifdef CONFIG_DEBUG_BLK_CGROUP
 703        blkg_stat_reset(&stats->unaccounted_time);
 704        blkg_stat_reset(&stats->avg_queue_size_sum);
 705        blkg_stat_reset(&stats->avg_queue_size_samples);
 706        blkg_stat_reset(&stats->dequeue);
 707        blkg_stat_reset(&stats->group_wait_time);
 708        blkg_stat_reset(&stats->idle_time);
 709        blkg_stat_reset(&stats->empty_time);
 710#endif
 711}
 712
 713/* @to += @from */
 714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
 715{
 716        /* queued stats shouldn't be cleared */
 717        blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
 718        blkg_rwstat_merge(&to->serviced, &from->serviced);
 719        blkg_rwstat_merge(&to->merged, &from->merged);
 720        blkg_rwstat_merge(&to->service_time, &from->service_time);
 721        blkg_rwstat_merge(&to->wait_time, &from->wait_time);
 722        blkg_stat_merge(&from->time, &from->time);
 723#ifdef CONFIG_DEBUG_BLK_CGROUP
 724        blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
 725        blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 726        blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
 727        blkg_stat_merge(&to->dequeue, &from->dequeue);
 728        blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
 729        blkg_stat_merge(&to->idle_time, &from->idle_time);
 730        blkg_stat_merge(&to->empty_time, &from->empty_time);
 731#endif
 732}
 733
 734/*
 735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
 736 * recursive stats can still account for the amount used by this cfqg after
 737 * it's gone.
 738 */
 739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 740{
 741        struct cfq_group *parent = cfqg_parent(cfqg);
 742
 743        lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
 744
 745        if (unlikely(!parent))
 746                return;
 747
 748        cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
 749        cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
 750        cfqg_stats_reset(&cfqg->stats);
 751        cfqg_stats_reset(&cfqg->dead_stats);
 752}
 753
 754#else   /* CONFIG_CFQ_GROUP_IOSCHED */
 755
 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 757static inline void cfqg_get(struct cfq_group *cfqg) { }
 758static inline void cfqg_put(struct cfq_group *cfqg) { }
 759
 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 761        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
 762                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 763                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 764                                ##args)
 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
 766
 767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 768                        struct cfq_group *curr_cfqg, int rw) { }
 769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 770                        unsigned long time, unsigned long unaccounted_time) { }
 771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 774                                              uint64_t bytes, int rw) { }
 775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 776                        uint64_t start_time, uint64_t io_start_time, int rw) { }
 777
 778#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 779
 780#define cfq_log(cfqd, fmt, args...)     \
 781        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 782
 783/* Traverses through cfq group service trees */
 784#define for_each_cfqg_st(cfqg, i, j, st) \
 785        for (i = 0; i <= IDLE_WORKLOAD; i++) \
 786                for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 787                        : &cfqg->service_tree_idle; \
 788                        (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 789                        (i == IDLE_WORKLOAD && j == 0); \
 790                        j++, st = i < IDLE_WORKLOAD ? \
 791                        &cfqg->service_trees[i][j]: NULL) \
 792
 793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 794        struct cfq_ttime *ttime, bool group_idle)
 795{
 796        unsigned long slice;
 797        if (!sample_valid(ttime->ttime_samples))
 798                return false;
 799        if (group_idle)
 800                slice = cfqd->cfq_group_idle;
 801        else
 802                slice = cfqd->cfq_slice_idle;
 803        return ttime->ttime_mean > slice;
 804}
 805
 806static inline bool iops_mode(struct cfq_data *cfqd)
 807{
 808        /*
 809         * If we are not idling on queues and it is a NCQ drive, parallel
 810         * execution of requests is on and measuring time is not possible
 811         * in most of the cases until and unless we drive shallower queue
 812         * depths and that becomes a performance bottleneck. In such cases
 813         * switch to start providing fairness in terms of number of IOs.
 814         */
 815        if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 816                return true;
 817        else
 818                return false;
 819}
 820
 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 822{
 823        if (cfq_class_idle(cfqq))
 824                return IDLE_WORKLOAD;
 825        if (cfq_class_rt(cfqq))
 826                return RT_WORKLOAD;
 827        return BE_WORKLOAD;
 828}
 829
 830
 831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 832{
 833        if (!cfq_cfqq_sync(cfqq))
 834                return ASYNC_WORKLOAD;
 835        if (!cfq_cfqq_idle_window(cfqq))
 836                return SYNC_NOIDLE_WORKLOAD;
 837        return SYNC_WORKLOAD;
 838}
 839
 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
 841                                        struct cfq_data *cfqd,
 842                                        struct cfq_group *cfqg)
 843{
 844        if (wl_class == IDLE_WORKLOAD)
 845                return cfqg->service_tree_idle.count;
 846
 847        return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
 848                cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
 849                cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 850}
 851
 852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 853                                        struct cfq_group *cfqg)
 854{
 855        return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
 856                cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 857}
 858
 859static void cfq_dispatch_insert(struct request_queue *, struct request *);
 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 861                                       struct cfq_io_cq *cic, struct bio *bio,
 862                                       gfp_t gfp_mask);
 863
 864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 865{
 866        /* cic->icq is the first member, %NULL will convert to %NULL */
 867        return container_of(icq, struct cfq_io_cq, icq);
 868}
 869
 870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 871                                               struct io_context *ioc)
 872{
 873        if (ioc)
 874                return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 875        return NULL;
 876}
 877
 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 879{
 880        return cic->cfqq[is_sync];
 881}
 882
 883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 884                                bool is_sync)
 885{
 886        cic->cfqq[is_sync] = cfqq;
 887}
 888
 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 890{
 891        return cic->icq.q->elevator->elevator_data;
 892}
 893
 894/*
 895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 896 * set (in which case it could also be direct WRITE).
 897 */
 898static inline bool cfq_bio_sync(struct bio *bio)
 899{
 900        return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 901}
 902
 903/*
 904 * scheduler run of queue, if there are requests pending and no one in the
 905 * driver that will restart queueing
 906 */
 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 908{
 909        if (cfqd->busy_queues) {
 910                cfq_log(cfqd, "schedule dispatch");
 911                kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 912        }
 913}
 914
 915/*
 916 * Scale schedule slice based on io priority. Use the sync time slice only
 917 * if a queue is marked sync and has sync io queued. A sync queue with async
 918 * io only, should not get full sync slice length.
 919 */
 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 921                                 unsigned short prio)
 922{
 923        const int base_slice = cfqd->cfq_slice[sync];
 924
 925        WARN_ON(prio >= IOPRIO_BE_NR);
 926
 927        return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 928}
 929
 930static inline int
 931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 932{
 933        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 934}
 935
 936/**
 937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
 938 * @charge: disk time being charged
 939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
 940 *
 941 * Scale @charge according to @vfraction, which is in range (0, 1].  The
 942 * scaling is inversely proportional.
 943 *
 944 * scaled = charge / vfraction
 945 *
 946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
 947 */
 948static inline u64 cfqg_scale_charge(unsigned long charge,
 949                                    unsigned int vfraction)
 950{
 951        u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
 952
 953        /* charge / vfraction */
 954        c <<= CFQ_SERVICE_SHIFT;
 955        do_div(c, vfraction);
 956        return c;
 957}
 958
 959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 960{
 961        s64 delta = (s64)(vdisktime - min_vdisktime);
 962        if (delta > 0)
 963                min_vdisktime = vdisktime;
 964
 965        return min_vdisktime;
 966}
 967
 968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 969{
 970        s64 delta = (s64)(vdisktime - min_vdisktime);
 971        if (delta < 0)
 972                min_vdisktime = vdisktime;
 973
 974        return min_vdisktime;
 975}
 976
 977static void update_min_vdisktime(struct cfq_rb_root *st)
 978{
 979        struct cfq_group *cfqg;
 980
 981        if (st->left) {
 982                cfqg = rb_entry_cfqg(st->left);
 983                st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 984                                                  cfqg->vdisktime);
 985        }
 986}
 987
 988/*
 989 * get averaged number of queues of RT/BE priority.
 990 * average is updated, with a formula that gives more weight to higher numbers,
 991 * to quickly follows sudden increases and decrease slowly
 992 */
 993
 994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 995                                        struct cfq_group *cfqg, bool rt)
 996{
 997        unsigned min_q, max_q;
 998        unsigned mult  = cfq_hist_divisor - 1;
 999        unsigned round = cfq_hist_divisor / 2;
1000        unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1001
1002        min_q = min(cfqg->busy_queues_avg[rt], busy);
1003        max_q = max(cfqg->busy_queues_avg[rt], busy);
1004        cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1005                cfq_hist_divisor;
1006        return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
1012        return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1013}
1014
1015static inline unsigned
1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1017{
1018        unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019        if (cfqd->cfq_latency) {
1020                /*
1021                 * interested queues (we consider only the ones with the same
1022                 * priority class in the cfq group)
1023                 */
1024                unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025                                                cfq_class_rt(cfqq));
1026                unsigned sync_slice = cfqd->cfq_slice[1];
1027                unsigned expect_latency = sync_slice * iq;
1028                unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030                if (expect_latency > group_slice) {
1031                        unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032                        /* scale low_slice according to IO priority
1033                         * and sync vs async */
1034                        unsigned low_slice =
1035                                min(slice, base_low_slice * slice / sync_slice);
1036                        /* the adapted slice value is scaled to fit all iqs
1037                         * into the target latency */
1038                        slice = max(slice * group_slice / expect_latency,
1039                                    low_slice);
1040                }
1041        }
1042        return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
1048        unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1049
1050        cfqq->slice_start = jiffies;
1051        cfqq->slice_end = jiffies + slice;
1052        cfqq->allocated_slice = slice;
1053        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
1061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1062{
1063        if (cfq_cfqq_slice_new(cfqq))
1064                return false;
1065        if (time_before(jiffies, cfqq->slice_end))
1066                return false;
1067
1068        return true;
1069}
1070
1071/*
1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1073 * We choose the request that is closest to the head right now. Distance
1074 * behind the head is penalized and only allowed to a certain extent.
1075 */
1076static struct request *
1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1078{
1079        sector_t s1, s2, d1 = 0, d2 = 0;
1080        unsigned long back_max;
1081#define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
1083        unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1084
1085        if (rq1 == NULL || rq1 == rq2)
1086                return rq2;
1087        if (rq2 == NULL)
1088                return rq1;
1089
1090        if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091                return rq_is_sync(rq1) ? rq1 : rq2;
1092
1093        if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094                return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1095
1096        s1 = blk_rq_pos(rq1);
1097        s2 = blk_rq_pos(rq2);
1098
1099        /*
1100         * by definition, 1KiB is 2 sectors
1101         */
1102        back_max = cfqd->cfq_back_max * 2;
1103
1104        /*
1105         * Strict one way elevator _except_ in the case where we allow
1106         * short backward seeks which are biased as twice the cost of a
1107         * similar forward seek.
1108         */
1109        if (s1 >= last)
1110                d1 = s1 - last;
1111        else if (s1 + back_max >= last)
1112                d1 = (last - s1) * cfqd->cfq_back_penalty;
1113        else
1114                wrap |= CFQ_RQ1_WRAP;
1115
1116        if (s2 >= last)
1117                d2 = s2 - last;
1118        else if (s2 + back_max >= last)
1119                d2 = (last - s2) * cfqd->cfq_back_penalty;
1120        else
1121                wrap |= CFQ_RQ2_WRAP;
1122
1123        /* Found required data */
1124
1125        /*
1126         * By doing switch() on the bit mask "wrap" we avoid having to
1127         * check two variables for all permutations: --> faster!
1128         */
1129        switch (wrap) {
1130        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1131                if (d1 < d2)
1132                        return rq1;
1133                else if (d2 < d1)
1134                        return rq2;
1135                else {
1136                        if (s1 >= s2)
1137                                return rq1;
1138                        else
1139                                return rq2;
1140                }
1141
1142        case CFQ_RQ2_WRAP:
1143                return rq1;
1144        case CFQ_RQ1_WRAP:
1145                return rq2;
1146        case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1147        default:
1148                /*
1149                 * Since both rqs are wrapped,
1150                 * start with the one that's further behind head
1151                 * (--> only *one* back seek required),
1152                 * since back seek takes more time than forward.
1153                 */
1154                if (s1 <= s2)
1155                        return rq1;
1156                else
1157                        return rq2;
1158        }
1159}
1160
1161/*
1162 * The below is leftmost cache rbtree addon
1163 */
1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1165{
1166        /* Service tree is empty */
1167        if (!root->count)
1168                return NULL;
1169
1170        if (!root->left)
1171                root->left = rb_first(&root->rb);
1172
1173        if (root->left)
1174                return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176        return NULL;
1177}
1178
1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181        if (!root->left)
1182                root->left = rb_first(&root->rb);
1183
1184        if (root->left)
1185                return rb_entry_cfqg(root->left);
1186
1187        return NULL;
1188}
1189
1190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192        rb_erase(n, root);
1193        RB_CLEAR_NODE(n);
1194}
1195
1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198        if (root->left == n)
1199                root->left = NULL;
1200        rb_erase_init(n, &root->rb);
1201        --root->count;
1202}
1203
1204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
1207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209                  struct request *last)
1210{
1211        struct rb_node *rbnext = rb_next(&last->rb_node);
1212        struct rb_node *rbprev = rb_prev(&last->rb_node);
1213        struct request *next = NULL, *prev = NULL;
1214
1215        BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1216
1217        if (rbprev)
1218                prev = rb_entry_rq(rbprev);
1219
1220        if (rbnext)
1221                next = rb_entry_rq(rbnext);
1222        else {
1223                rbnext = rb_first(&cfqq->sort_list);
1224                if (rbnext && rbnext != &last->rb_node)
1225                        next = rb_entry_rq(rbnext);
1226        }
1227
1228        return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1229}
1230
1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232                                      struct cfq_queue *cfqq)
1233{
1234        /*
1235         * just an approximation, should be ok.
1236         */
1237        return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1238                       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1239}
1240
1241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244        return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250        struct rb_node **node = &st->rb.rb_node;
1251        struct rb_node *parent = NULL;
1252        struct cfq_group *__cfqg;
1253        s64 key = cfqg_key(st, cfqg);
1254        int left = 1;
1255
1256        while (*node != NULL) {
1257                parent = *node;
1258                __cfqg = rb_entry_cfqg(parent);
1259
1260                if (key < cfqg_key(st, __cfqg))
1261                        node = &parent->rb_left;
1262                else {
1263                        node = &parent->rb_right;
1264                        left = 0;
1265                }
1266        }
1267
1268        if (left)
1269                st->left = &cfqg->rb_node;
1270
1271        rb_link_node(&cfqg->rb_node, parent, node);
1272        rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
1275static void
1276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
1278        if (cfqg->new_weight) {
1279                cfqg->weight = cfqg->new_weight;
1280                cfqg->new_weight = 0;
1281        }
1282}
1283
1284static void
1285cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1286{
1287        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1288
1289        if (cfqg->new_leaf_weight) {
1290                cfqg->leaf_weight = cfqg->new_leaf_weight;
1291                cfqg->new_leaf_weight = 0;
1292        }
1293}
1294
1295static void
1296cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1297{
1298        unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
1299        struct cfq_group *pos = cfqg;
1300        struct cfq_group *parent;
1301        bool propagate;
1302
1303        /* add to the service tree */
1304        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1305
1306        cfq_update_group_leaf_weight(cfqg);
1307        __cfq_group_service_tree_add(st, cfqg);
1308
1309        /*
1310         * Activate @cfqg and calculate the portion of vfraction @cfqg is
1311         * entitled to.  vfraction is calculated by walking the tree
1312         * towards the root calculating the fraction it has at each level.
1313         * The compounded ratio is how much vfraction @cfqg owns.
1314         *
1315         * Start with the proportion tasks in this cfqg has against active
1316         * children cfqgs - its leaf_weight against children_weight.
1317         */
1318        propagate = !pos->nr_active++;
1319        pos->children_weight += pos->leaf_weight;
1320        vfr = vfr * pos->leaf_weight / pos->children_weight;
1321
1322        /*
1323         * Compound ->weight walking up the tree.  Both activation and
1324         * vfraction calculation are done in the same loop.  Propagation
1325         * stops once an already activated node is met.  vfraction
1326         * calculation should always continue to the root.
1327         */
1328        while ((parent = cfqg_parent(pos))) {
1329                if (propagate) {
1330                        cfq_update_group_weight(pos);
1331                        propagate = !parent->nr_active++;
1332                        parent->children_weight += pos->weight;
1333                }
1334                vfr = vfr * pos->weight / parent->children_weight;
1335                pos = parent;
1336        }
1337
1338        cfqg->vfraction = max_t(unsigned, vfr, 1);
1339}
1340
1341static void
1342cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1343{
1344        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1345        struct cfq_group *__cfqg;
1346        struct rb_node *n;
1347
1348        cfqg->nr_cfqq++;
1349        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1350                return;
1351
1352        /*
1353         * Currently put the group at the end. Later implement something
1354         * so that groups get lesser vtime based on their weights, so that
1355         * if group does not loose all if it was not continuously backlogged.
1356         */
1357        n = rb_last(&st->rb);
1358        if (n) {
1359                __cfqg = rb_entry_cfqg(n);
1360                cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1361        } else
1362                cfqg->vdisktime = st->min_vdisktime;
1363        cfq_group_service_tree_add(st, cfqg);
1364}
1365
1366static void
1367cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1368{
1369        struct cfq_group *pos = cfqg;
1370        bool propagate;
1371
1372        /*
1373         * Undo activation from cfq_group_service_tree_add().  Deactivate
1374         * @cfqg and propagate deactivation upwards.
1375         */
1376        propagate = !--pos->nr_active;
1377        pos->children_weight -= pos->leaf_weight;
1378
1379        while (propagate) {
1380                struct cfq_group *parent = cfqg_parent(pos);
1381
1382                /* @pos has 0 nr_active at this point */
1383                WARN_ON_ONCE(pos->children_weight);
1384                pos->vfraction = 0;
1385
1386                if (!parent)
1387                        break;
1388
1389                propagate = !--parent->nr_active;
1390                parent->children_weight -= pos->weight;
1391                pos = parent;
1392        }
1393
1394        /* remove from the service tree */
1395        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1396                cfq_rb_erase(&cfqg->rb_node, st);
1397}
1398
1399static void
1400cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1401{
1402        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1403
1404        BUG_ON(cfqg->nr_cfqq < 1);
1405        cfqg->nr_cfqq--;
1406
1407        /* If there are other cfq queues under this group, don't delete it */
1408        if (cfqg->nr_cfqq)
1409                return;
1410
1411        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1412        cfq_group_service_tree_del(st, cfqg);
1413        cfqg->saved_wl_slice = 0;
1414        cfqg_stats_update_dequeue(cfqg);
1415}
1416
1417static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1418                                                unsigned int *unaccounted_time)
1419{
1420        unsigned int slice_used;
1421
1422        /*
1423         * Queue got expired before even a single request completed or
1424         * got expired immediately after first request completion.
1425         */
1426        if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1427                /*
1428                 * Also charge the seek time incurred to the group, otherwise
1429                 * if there are mutiple queues in the group, each can dispatch
1430                 * a single request on seeky media and cause lots of seek time
1431                 * and group will never know it.
1432                 */
1433                slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1434                                        1);
1435        } else {
1436                slice_used = jiffies - cfqq->slice_start;
1437                if (slice_used > cfqq->allocated_slice) {
1438                        *unaccounted_time = slice_used - cfqq->allocated_slice;
1439                        slice_used = cfqq->allocated_slice;
1440                }
1441                if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1442                        *unaccounted_time += cfqq->slice_start -
1443                                        cfqq->dispatch_start;
1444        }
1445
1446        return slice_used;
1447}
1448
1449static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1450                                struct cfq_queue *cfqq)
1451{
1452        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1453        unsigned int used_sl, charge, unaccounted_sl = 0;
1454        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1455                        - cfqg->service_tree_idle.count;
1456        unsigned int vfr;
1457
1458        BUG_ON(nr_sync < 0);
1459        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1460
1461        if (iops_mode(cfqd))
1462                charge = cfqq->slice_dispatch;
1463        else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1464                charge = cfqq->allocated_slice;
1465
1466        /*
1467         * Can't update vdisktime while on service tree and cfqg->vfraction
1468         * is valid only while on it.  Cache vfr, leave the service tree,
1469         * update vdisktime and go back on.  The re-addition to the tree
1470         * will also update the weights as necessary.
1471         */
1472        vfr = cfqg->vfraction;
1473        cfq_group_service_tree_del(st, cfqg);
1474        cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1475        cfq_group_service_tree_add(st, cfqg);
1476
1477        /* This group is being expired. Save the context */
1478        if (time_after(cfqd->workload_expires, jiffies)) {
1479                cfqg->saved_wl_slice = cfqd->workload_expires
1480                                                - jiffies;
1481                cfqg->saved_wl_type = cfqd->serving_wl_type;
1482                cfqg->saved_wl_class = cfqd->serving_wl_class;
1483        } else
1484                cfqg->saved_wl_slice = 0;
1485
1486        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1487                                        st->min_vdisktime);
1488        cfq_log_cfqq(cfqq->cfqd, cfqq,
1489                     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1490                     used_sl, cfqq->slice_dispatch, charge,
1491                     iops_mode(cfqd), cfqq->nr_sectors);
1492        cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1493        cfqg_stats_set_start_empty_time(cfqg);
1494}
1495
1496/**
1497 * cfq_init_cfqg_base - initialize base part of a cfq_group
1498 * @cfqg: cfq_group to initialize
1499 *
1500 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1501 * is enabled or not.
1502 */
1503static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1504{
1505        struct cfq_rb_root *st;
1506        int i, j;
1507
1508        for_each_cfqg_st(cfqg, i, j, st)
1509                *st = CFQ_RB_ROOT;
1510        RB_CLEAR_NODE(&cfqg->rb_node);
1511
1512        cfqg->ttime.last_end_request = jiffies;
1513}
1514
1515#ifdef CONFIG_CFQ_GROUP_IOSCHED
1516static void cfq_pd_init(struct blkcg_gq *blkg)
1517{
1518        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1519
1520        cfq_init_cfqg_base(cfqg);
1521        cfqg->weight = blkg->blkcg->cfq_weight;
1522        cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
1523}
1524
1525static void cfq_pd_offline(struct blkcg_gq *blkg)
1526{
1527        /*
1528         * @blkg is going offline and will be ignored by
1529         * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1530         * that they don't get lost.  If IOs complete after this point, the
1531         * stats for them will be lost.  Oh well...
1532         */
1533        cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1534}
1535
1536/* offset delta from cfqg->stats to cfqg->dead_stats */
1537static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1538                                        offsetof(struct cfq_group, stats);
1539
1540/* to be used by recursive prfill, sums live and dead stats recursively */
1541static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1542{
1543        u64 sum = 0;
1544
1545        sum += blkg_stat_recursive_sum(pd, off);
1546        sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1547        return sum;
1548}
1549
1550/* to be used by recursive prfill, sums live and dead rwstats recursively */
1551static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1552                                                       int off)
1553{
1554        struct blkg_rwstat a, b;
1555
1556        a = blkg_rwstat_recursive_sum(pd, off);
1557        b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1558        blkg_rwstat_merge(&a, &b);
1559        return a;
1560}
1561
1562static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1563{
1564        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1565
1566        cfqg_stats_reset(&cfqg->stats);
1567        cfqg_stats_reset(&cfqg->dead_stats);
1568}
1569
1570/*
1571 * Search for the cfq group current task belongs to. request_queue lock must
1572 * be held.
1573 */
1574static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1575                                                struct blkcg *blkcg)
1576{
1577        struct request_queue *q = cfqd->queue;
1578        struct cfq_group *cfqg = NULL;
1579
1580        /* avoid lookup for the common case where there's no blkcg */
1581        if (blkcg == &blkcg_root) {
1582                cfqg = cfqd->root_group;
1583        } else {
1584                struct blkcg_gq *blkg;
1585
1586                blkg = blkg_lookup_create(blkcg, q);
1587                if (!IS_ERR(blkg))
1588                        cfqg = blkg_to_cfqg(blkg);
1589        }
1590
1591        return cfqg;
1592}
1593
1594static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1595{
1596        /* Currently, all async queues are mapped to root group */
1597        if (!cfq_cfqq_sync(cfqq))
1598                cfqg = cfqq->cfqd->root_group;
1599
1600        cfqq->cfqg = cfqg;
1601        /* cfqq reference on cfqg */
1602        cfqg_get(cfqg);
1603}
1604
1605static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1606                                     struct blkg_policy_data *pd, int off)
1607{
1608        struct cfq_group *cfqg = pd_to_cfqg(pd);
1609
1610        if (!cfqg->dev_weight)
1611                return 0;
1612        return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1613}
1614
1615static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1616                                    struct seq_file *sf)
1617{
1618        blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1619                          cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
1620                          false);
1621        return 0;
1622}
1623
1624static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1625                                          struct blkg_policy_data *pd, int off)
1626{
1627        struct cfq_group *cfqg = pd_to_cfqg(pd);
1628
1629        if (!cfqg->dev_leaf_weight)
1630                return 0;
1631        return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1632}
1633
1634static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
1635                                         struct cftype *cft,
1636                                         struct seq_file *sf)
1637{
1638        blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1639                          cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
1640                          false);
1641        return 0;
1642}
1643
1644static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1645                            struct seq_file *sf)
1646{
1647        seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
1648        return 0;
1649}
1650
1651static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
1652                                 struct seq_file *sf)
1653{
1654        seq_printf(sf, "%u\n",
1655                   cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
1656        return 0;
1657}
1658
1659static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1660                                    const char *buf, bool is_leaf_weight)
1661{
1662        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1663        struct blkg_conf_ctx ctx;
1664        struct cfq_group *cfqg;
1665        int ret;
1666
1667        ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1668        if (ret)
1669                return ret;
1670
1671        ret = -EINVAL;
1672        cfqg = blkg_to_cfqg(ctx.blkg);
1673        if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1674                if (!is_leaf_weight) {
1675                        cfqg->dev_weight = ctx.v;
1676                        cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1677                } else {
1678                        cfqg->dev_leaf_weight = ctx.v;
1679                        cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1680                }
1681                ret = 0;
1682        }
1683
1684        blkg_conf_finish(&ctx);
1685        return ret;
1686}
1687
1688static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1689                                  const char *buf)
1690{
1691        return __cfqg_set_weight_device(cgrp, cft, buf, false);
1692}
1693
1694static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
1695                                       const char *buf)
1696{
1697        return __cfqg_set_weight_device(cgrp, cft, buf, true);
1698}
1699
1700static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
1701                            bool is_leaf_weight)
1702{
1703        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1704        struct blkcg_gq *blkg;
1705
1706        if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1707                return -EINVAL;
1708
1709        spin_lock_irq(&blkcg->lock);
1710
1711        if (!is_leaf_weight)
1712                blkcg->cfq_weight = val;
1713        else
1714                blkcg->cfq_leaf_weight = val;
1715
1716        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1717                struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1718
1719                if (!cfqg)
1720                        continue;
1721
1722                if (!is_leaf_weight) {
1723                        if (!cfqg->dev_weight)
1724                                cfqg->new_weight = blkcg->cfq_weight;
1725                } else {
1726                        if (!cfqg->dev_leaf_weight)
1727                                cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1728                }
1729        }
1730
1731        spin_unlock_irq(&blkcg->lock);
1732        return 0;
1733}
1734
1735static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1736{
1737        return __cfq_set_weight(cgrp, cft, val, false);
1738}
1739
1740static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1741{
1742        return __cfq_set_weight(cgrp, cft, val, true);
1743}
1744
1745static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1746                           struct seq_file *sf)
1747{
1748        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1749
1750        blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
1751                          cft->private, false);
1752        return 0;
1753}
1754
1755static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
1756                             struct seq_file *sf)
1757{
1758        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1759
1760        blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
1761                          cft->private, true);
1762        return 0;
1763}
1764
1765static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1766                                      struct blkg_policy_data *pd, int off)
1767{
1768        u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1769
1770        return __blkg_prfill_u64(sf, pd, sum);
1771}
1772
1773static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1774                                        struct blkg_policy_data *pd, int off)
1775{
1776        struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1777
1778        return __blkg_prfill_rwstat(sf, pd, &sum);
1779}
1780
1781static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
1782                                     struct seq_file *sf)
1783{
1784        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1785
1786        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
1787                          &blkcg_policy_cfq, cft->private, false);
1788        return 0;
1789}
1790
1791static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
1792                                       struct seq_file *sf)
1793{
1794        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1795
1796        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
1797                          &blkcg_policy_cfq, cft->private, true);
1798        return 0;
1799}
1800
1801#ifdef CONFIG_DEBUG_BLK_CGROUP
1802static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1803                                      struct blkg_policy_data *pd, int off)
1804{
1805        struct cfq_group *cfqg = pd_to_cfqg(pd);
1806        u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1807        u64 v = 0;
1808
1809        if (samples) {
1810                v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1811                v = div64_u64(v, samples);
1812        }
1813        __blkg_prfill_u64(sf, pd, v);
1814        return 0;
1815}
1816
1817/* print avg_queue_size */
1818static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1819                                     struct seq_file *sf)
1820{
1821        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1822
1823        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
1824                          &blkcg_policy_cfq, 0, false);
1825        return 0;
1826}
1827#endif  /* CONFIG_DEBUG_BLK_CGROUP */
1828
1829static struct cftype cfq_blkcg_files[] = {
1830        /* on root, weight is mapped to leaf_weight */
1831        {
1832                .name = "weight_device",
1833                .flags = CFTYPE_ONLY_ON_ROOT,
1834                .read_seq_string = cfqg_print_leaf_weight_device,
1835                .write_string = cfqg_set_leaf_weight_device,
1836                .max_write_len = 256,
1837        },
1838        {
1839                .name = "weight",
1840                .flags = CFTYPE_ONLY_ON_ROOT,
1841                .read_seq_string = cfq_print_leaf_weight,
1842                .write_u64 = cfq_set_leaf_weight,
1843        },
1844
1845        /* no such mapping necessary for !roots */
1846        {
1847                .name = "weight_device",
1848                .flags = CFTYPE_NOT_ON_ROOT,
1849                .read_seq_string = cfqg_print_weight_device,
1850                .write_string = cfqg_set_weight_device,
1851                .max_write_len = 256,
1852        },
1853        {
1854                .name = "weight",
1855                .flags = CFTYPE_NOT_ON_ROOT,
1856                .read_seq_string = cfq_print_weight,
1857                .write_u64 = cfq_set_weight,
1858        },
1859
1860        {
1861                .name = "leaf_weight_device",
1862                .read_seq_string = cfqg_print_leaf_weight_device,
1863                .write_string = cfqg_set_leaf_weight_device,
1864                .max_write_len = 256,
1865        },
1866        {
1867                .name = "leaf_weight",
1868                .read_seq_string = cfq_print_leaf_weight,
1869                .write_u64 = cfq_set_leaf_weight,
1870        },
1871
1872        /* statistics, covers only the tasks in the cfqg */
1873        {
1874                .name = "time",
1875                .private = offsetof(struct cfq_group, stats.time),
1876                .read_seq_string = cfqg_print_stat,
1877        },
1878        {
1879                .name = "sectors",
1880                .private = offsetof(struct cfq_group, stats.sectors),
1881                .read_seq_string = cfqg_print_stat,
1882        },
1883        {
1884                .name = "io_service_bytes",
1885                .private = offsetof(struct cfq_group, stats.service_bytes),
1886                .read_seq_string = cfqg_print_rwstat,
1887        },
1888        {
1889                .name = "io_serviced",
1890                .private = offsetof(struct cfq_group, stats.serviced),
1891                .read_seq_string = cfqg_print_rwstat,
1892        },
1893        {
1894                .name = "io_service_time",
1895                .private = offsetof(struct cfq_group, stats.service_time),
1896                .read_seq_string = cfqg_print_rwstat,
1897        },
1898        {
1899                .name = "io_wait_time",
1900                .private = offsetof(struct cfq_group, stats.wait_time),
1901                .read_seq_string = cfqg_print_rwstat,
1902        },
1903        {
1904                .name = "io_merged",
1905                .private = offsetof(struct cfq_group, stats.merged),
1906                .read_seq_string = cfqg_print_rwstat,
1907        },
1908        {
1909                .name = "io_queued",
1910                .private = offsetof(struct cfq_group, stats.queued),
1911                .read_seq_string = cfqg_print_rwstat,
1912        },
1913
1914        /* the same statictics which cover the cfqg and its descendants */
1915        {
1916                .name = "time_recursive",
1917                .private = offsetof(struct cfq_group, stats.time),
1918                .read_seq_string = cfqg_print_stat_recursive,
1919        },
1920        {
1921                .name = "sectors_recursive",
1922                .private = offsetof(struct cfq_group, stats.sectors),
1923                .read_seq_string = cfqg_print_stat_recursive,
1924        },
1925        {
1926                .name = "io_service_bytes_recursive",
1927                .private = offsetof(struct cfq_group, stats.service_bytes),
1928                .read_seq_string = cfqg_print_rwstat_recursive,
1929        },
1930        {
1931                .name = "io_serviced_recursive",
1932                .private = offsetof(struct cfq_group, stats.serviced),
1933                .read_seq_string = cfqg_print_rwstat_recursive,
1934        },
1935        {
1936                .name = "io_service_time_recursive",
1937                .private = offsetof(struct cfq_group, stats.service_time),
1938                .read_seq_string = cfqg_print_rwstat_recursive,
1939        },
1940        {
1941                .name = "io_wait_time_recursive",
1942                .private = offsetof(struct cfq_group, stats.wait_time),
1943                .read_seq_string = cfqg_print_rwstat_recursive,
1944        },
1945        {
1946                .name = "io_merged_recursive",
1947                .private = offsetof(struct cfq_group, stats.merged),
1948                .read_seq_string = cfqg_print_rwstat_recursive,
1949        },
1950        {
1951                .name = "io_queued_recursive",
1952                .private = offsetof(struct cfq_group, stats.queued),
1953                .read_seq_string = cfqg_print_rwstat_recursive,
1954        },
1955#ifdef CONFIG_DEBUG_BLK_CGROUP
1956        {
1957                .name = "avg_queue_size",
1958                .read_seq_string = cfqg_print_avg_queue_size,
1959        },
1960        {
1961                .name = "group_wait_time",
1962                .private = offsetof(struct cfq_group, stats.group_wait_time),
1963                .read_seq_string = cfqg_print_stat,
1964        },
1965        {
1966                .name = "idle_time",
1967                .private = offsetof(struct cfq_group, stats.idle_time),
1968                .read_seq_string = cfqg_print_stat,
1969        },
1970        {
1971                .name = "empty_time",
1972                .private = offsetof(struct cfq_group, stats.empty_time),
1973                .read_seq_string = cfqg_print_stat,
1974        },
1975        {
1976                .name = "dequeue",
1977                .private = offsetof(struct cfq_group, stats.dequeue),
1978                .read_seq_string = cfqg_print_stat,
1979        },
1980        {
1981                .name = "unaccounted_time",
1982                .private = offsetof(struct cfq_group, stats.unaccounted_time),
1983                .read_seq_string = cfqg_print_stat,
1984        },
1985#endif  /* CONFIG_DEBUG_BLK_CGROUP */
1986        { }     /* terminate */
1987};
1988#else /* GROUP_IOSCHED */
1989static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1990                                                struct blkcg *blkcg)
1991{
1992        return cfqd->root_group;
1993}
1994
1995static inline void
1996cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1997        cfqq->cfqg = cfqg;
1998}
1999
2000#endif /* GROUP_IOSCHED */
2001
2002/*
2003 * The cfqd->service_trees holds all pending cfq_queue's that have
2004 * requests waiting to be processed. It is sorted in the order that
2005 * we will service the queues.
2006 */
2007static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2008                                 bool add_front)
2009{
2010        struct rb_node **p, *parent;
2011        struct cfq_queue *__cfqq;
2012        unsigned long rb_key;
2013        struct cfq_rb_root *st;
2014        int left;
2015        int new_cfqq = 1;
2016
2017        st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2018        if (cfq_class_idle(cfqq)) {
2019                rb_key = CFQ_IDLE_DELAY;
2020                parent = rb_last(&st->rb);
2021                if (parent && parent != &cfqq->rb_node) {
2022                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2023                        rb_key += __cfqq->rb_key;
2024                } else
2025                        rb_key += jiffies;
2026        } else if (!add_front) {
2027                /*
2028                 * Get our rb key offset. Subtract any residual slice
2029                 * value carried from last service. A negative resid
2030                 * count indicates slice overrun, and this should position
2031                 * the next service time further away in the tree.
2032                 */
2033                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2034                rb_key -= cfqq->slice_resid;
2035                cfqq->slice_resid = 0;
2036        } else {
2037                rb_key = -HZ;
2038                __cfqq = cfq_rb_first(st);
2039                rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2040        }
2041
2042        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2043                new_cfqq = 0;
2044                /*
2045                 * same position, nothing more to do
2046                 */
2047                if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2048                        return;
2049
2050                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2051                cfqq->service_tree = NULL;
2052        }
2053
2054        left = 1;
2055        parent = NULL;
2056        cfqq->service_tree = st;
2057        p = &st->rb.rb_node;
2058        while (*p) {
2059                parent = *p;
2060                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2061
2062                /*
2063                 * sort by key, that represents service time.
2064                 */
2065                if (time_before(rb_key, __cfqq->rb_key))
2066                        p = &parent->rb_left;
2067                else {
2068                        p = &parent->rb_right;
2069                        left = 0;
2070                }
2071        }
2072
2073        if (left)
2074                st->left = &cfqq->rb_node;
2075
2076        cfqq->rb_key = rb_key;
2077        rb_link_node(&cfqq->rb_node, parent, p);
2078        rb_insert_color(&cfqq->rb_node, &st->rb);
2079        st->count++;
2080        if (add_front || !new_cfqq)
2081                return;
2082        cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2083}
2084
2085static struct cfq_queue *
2086cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2087                     sector_t sector, struct rb_node **ret_parent,
2088                     struct rb_node ***rb_link)
2089{
2090        struct rb_node **p, *parent;
2091        struct cfq_queue *cfqq = NULL;
2092
2093        parent = NULL;
2094        p = &root->rb_node;
2095        while (*p) {
2096                struct rb_node **n;
2097
2098                parent = *p;
2099                cfqq = rb_entry(parent, struct cfq_queue, p_node);
2100
2101                /*
2102                 * Sort strictly based on sector.  Smallest to the left,
2103                 * largest to the right.
2104                 */
2105                if (sector > blk_rq_pos(cfqq->next_rq))
2106                        n = &(*p)->rb_right;
2107                else if (sector < blk_rq_pos(cfqq->next_rq))
2108                        n = &(*p)->rb_left;
2109                else
2110                        break;
2111                p = n;
2112                cfqq = NULL;
2113        }
2114
2115        *ret_parent = parent;
2116        if (rb_link)
2117                *rb_link = p;
2118        return cfqq;
2119}
2120
2121static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2122{
2123        struct rb_node **p, *parent;
2124        struct cfq_queue *__cfqq;
2125
2126        if (cfqq->p_root) {
2127                rb_erase(&cfqq->p_node, cfqq->p_root);
2128                cfqq->p_root = NULL;
2129        }
2130
2131        if (cfq_class_idle(cfqq))
2132                return;
2133        if (!cfqq->next_rq)
2134                return;
2135
2136        cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2137        __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2138                                      blk_rq_pos(cfqq->next_rq), &parent, &p);
2139        if (!__cfqq) {
2140                rb_link_node(&cfqq->p_node, parent, p);
2141                rb_insert_color(&cfqq->p_node, cfqq->p_root);
2142        } else
2143                cfqq->p_root = NULL;
2144}
2145
2146/*
2147 * Update cfqq's position in the service tree.
2148 */
2149static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2150{
2151        /*
2152         * Resorting requires the cfqq to be on the RR list already.
2153         */
2154        if (cfq_cfqq_on_rr(cfqq)) {
2155                cfq_service_tree_add(cfqd, cfqq, 0);
2156                cfq_prio_tree_add(cfqd, cfqq);
2157        }
2158}
2159
2160/*
2161 * add to busy list of queues for service, trying to be fair in ordering
2162 * the pending list according to last request service
2163 */
2164static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2165{
2166        cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2167        BUG_ON(cfq_cfqq_on_rr(cfqq));
2168        cfq_mark_cfqq_on_rr(cfqq);
2169        cfqd->busy_queues++;
2170        if (cfq_cfqq_sync(cfqq))
2171                cfqd->busy_sync_queues++;
2172
2173        cfq_resort_rr_list(cfqd, cfqq);
2174}
2175
2176/*
2177 * Called when the cfqq no longer has requests pending, remove it from
2178 * the service tree.
2179 */
2180static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2181{
2182        cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2183        BUG_ON(!cfq_cfqq_on_rr(cfqq));
2184        cfq_clear_cfqq_on_rr(cfqq);
2185
2186        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2187                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2188                cfqq->service_tree = NULL;
2189        }
2190        if (cfqq->p_root) {
2191                rb_erase(&cfqq->p_node, cfqq->p_root);
2192                cfqq->p_root = NULL;
2193        }
2194
2195        cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2196        BUG_ON(!cfqd->busy_queues);
2197        cfqd->busy_queues--;
2198        if (cfq_cfqq_sync(cfqq))
2199                cfqd->busy_sync_queues--;
2200}
2201
2202/*
2203 * rb tree support functions
2204 */
2205static void cfq_del_rq_rb(struct request *rq)
2206{
2207        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2208        const int sync = rq_is_sync(rq);
2209
2210        BUG_ON(!cfqq->queued[sync]);
2211        cfqq->queued[sync]--;
2212
2213        elv_rb_del(&cfqq->sort_list, rq);
2214
2215        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2216                /*
2217                 * Queue will be deleted from service tree when we actually
2218                 * expire it later. Right now just remove it from prio tree
2219                 * as it is empty.
2220                 */
2221                if (cfqq->p_root) {
2222                        rb_erase(&cfqq->p_node, cfqq->p_root);
2223                        cfqq->p_root = NULL;
2224                }
2225        }
2226}
2227
2228static void cfq_add_rq_rb(struct request *rq)
2229{
2230        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2231        struct cfq_data *cfqd = cfqq->cfqd;
2232        struct request *prev;
2233
2234        cfqq->queued[rq_is_sync(rq)]++;
2235
2236        elv_rb_add(&cfqq->sort_list, rq);
2237
2238        if (!cfq_cfqq_on_rr(cfqq))
2239                cfq_add_cfqq_rr(cfqd, cfqq);
2240
2241        /*
2242         * check if this request is a better next-serve candidate
2243         */
2244        prev = cfqq->next_rq;
2245        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2246
2247        /*
2248         * adjust priority tree position, if ->next_rq changes
2249         */
2250        if (prev != cfqq->next_rq)
2251                cfq_prio_tree_add(cfqd, cfqq);
2252
2253        BUG_ON(!cfqq->next_rq);
2254}
2255
2256static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2257{
2258        elv_rb_del(&cfqq->sort_list, rq);
2259        cfqq->queued[rq_is_sync(rq)]--;
2260        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2261        cfq_add_rq_rb(rq);
2262        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2263                                 rq->cmd_flags);
2264}
2265
2266static struct request *
2267cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2268{
2269        struct task_struct *tsk = current;
2270        struct cfq_io_cq *cic;
2271        struct cfq_queue *cfqq;
2272
2273        cic = cfq_cic_lookup(cfqd, tsk->io_context);
2274        if (!cic)
2275                return NULL;
2276
2277        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2278        if (cfqq)
2279                return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2280
2281        return NULL;
2282}
2283
2284static void cfq_activate_request(struct request_queue *q, struct request *rq)
2285{
2286        struct cfq_data *cfqd = q->elevator->elevator_data;
2287
2288        cfqd->rq_in_driver++;
2289        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2290                                                cfqd->rq_in_driver);
2291
2292        cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2293}
2294
2295static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2296{
2297        struct cfq_data *cfqd = q->elevator->elevator_data;
2298
2299        WARN_ON(!cfqd->rq_in_driver);
2300        cfqd->rq_in_driver--;
2301        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2302                                                cfqd->rq_in_driver);
2303}
2304
2305static void cfq_remove_request(struct request *rq)
2306{
2307        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2308
2309        if (cfqq->next_rq == rq)
2310                cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2311
2312        list_del_init(&rq->queuelist);
2313        cfq_del_rq_rb(rq);
2314
2315        cfqq->cfqd->rq_queued--;
2316        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2317        if (rq->cmd_flags & REQ_PRIO) {
2318                WARN_ON(!cfqq->prio_pending);
2319                cfqq->prio_pending--;
2320        }
2321}
2322
2323static int cfq_merge(struct request_queue *q, struct request **req,
2324                     struct bio *bio)
2325{
2326        struct cfq_data *cfqd = q->elevator->elevator_data;
2327        struct request *__rq;
2328
2329        __rq = cfq_find_rq_fmerge(cfqd, bio);
2330        if (__rq && elv_rq_merge_ok(__rq, bio)) {
2331                *req = __rq;
2332                return ELEVATOR_FRONT_MERGE;
2333        }
2334
2335        return ELEVATOR_NO_MERGE;
2336}
2337
2338static void cfq_merged_request(struct request_queue *q, struct request *req,
2339                               int type)
2340{
2341        if (type == ELEVATOR_FRONT_MERGE) {
2342                struct cfq_queue *cfqq = RQ_CFQQ(req);
2343
2344                cfq_reposition_rq_rb(cfqq, req);
2345        }
2346}
2347
2348static void cfq_bio_merged(struct request_queue *q, struct request *req,
2349                                struct bio *bio)
2350{
2351        cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2352}
2353
2354static void
2355cfq_merged_requests(struct request_queue *q, struct request *rq,
2356                    struct request *next)
2357{
2358        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2359        struct cfq_data *cfqd = q->elevator->elevator_data;
2360
2361        /*
2362         * reposition in fifo if next is older than rq
2363         */
2364        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2365            time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
2366            cfqq == RQ_CFQQ(next)) {
2367                list_move(&rq->queuelist, &next->queuelist);
2368                rq_set_fifo_time(rq, rq_fifo_time(next));
2369        }
2370
2371        if (cfqq->next_rq == next)
2372                cfqq->next_rq = rq;
2373        cfq_remove_request(next);
2374        cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2375
2376        cfqq = RQ_CFQQ(next);
2377        /*
2378         * all requests of this queue are merged to other queues, delete it
2379         * from the service tree. If it's the active_queue,
2380         * cfq_dispatch_requests() will choose to expire it or do idle
2381         */
2382        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2383            cfqq != cfqd->active_queue)
2384                cfq_del_cfqq_rr(cfqd, cfqq);
2385}
2386
2387static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2388                           struct bio *bio)
2389{
2390        struct cfq_data *cfqd = q->elevator->elevator_data;
2391        struct cfq_io_cq *cic;
2392        struct cfq_queue *cfqq;
2393
2394        /*
2395         * Disallow merge of a sync bio into an async request.
2396         */
2397        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2398                return false;
2399
2400        /*
2401         * Lookup the cfqq that this bio will be queued with and allow
2402         * merge only if rq is queued there.
2403         */
2404        cic = cfq_cic_lookup(cfqd, current->io_context);
2405        if (!cic)
2406                return false;
2407
2408        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2409        return cfqq == RQ_CFQQ(rq);
2410}
2411
2412static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2413{
2414        del_timer(&cfqd->idle_slice_timer);
2415        cfqg_stats_update_idle_time(cfqq->cfqg);
2416}
2417
2418static void __cfq_set_active_queue(struct cfq_data *cfqd,
2419                                   struct cfq_queue *cfqq)
2420{
2421        if (cfqq) {
2422                cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2423                                cfqd->serving_wl_class, cfqd->serving_wl_type);
2424                cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2425                cfqq->slice_start = 0;
2426                cfqq->dispatch_start = jiffies;
2427                cfqq->allocated_slice = 0;
2428                cfqq->slice_end = 0;
2429                cfqq->slice_dispatch = 0;
2430                cfqq->nr_sectors = 0;
2431
2432                cfq_clear_cfqq_wait_request(cfqq);
2433                cfq_clear_cfqq_must_dispatch(cfqq);
2434                cfq_clear_cfqq_must_alloc_slice(cfqq);
2435                cfq_clear_cfqq_fifo_expire(cfqq);
2436                cfq_mark_cfqq_slice_new(cfqq);
2437
2438                cfq_del_timer(cfqd, cfqq);
2439        }
2440
2441        cfqd->active_queue = cfqq;
2442}
2443
2444/*
2445 * current cfqq expired its slice (or was too idle), select new one
2446 */
2447static void
2448__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2449                    bool timed_out)
2450{
2451        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2452
2453        if (cfq_cfqq_wait_request(cfqq))
2454                cfq_del_timer(cfqd, cfqq);
2455
2456        cfq_clear_cfqq_wait_request(cfqq);
2457        cfq_clear_cfqq_wait_busy(cfqq);
2458
2459        /*
2460         * If this cfqq is shared between multiple processes, check to
2461         * make sure that those processes are still issuing I/Os within
2462         * the mean seek distance.  If not, it may be time to break the
2463         * queues apart again.
2464         */
2465        if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2466                cfq_mark_cfqq_split_coop(cfqq);
2467
2468        /*
2469         * store what was left of this slice, if the queue idled/timed out
2470         */
2471        if (timed_out) {
2472                if (cfq_cfqq_slice_new(cfqq))
2473                        cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2474                else
2475                        cfqq->slice_resid = cfqq->slice_end - jiffies;
2476                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2477        }
2478
2479        cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2480
2481        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2482                cfq_del_cfqq_rr(cfqd, cfqq);
2483
2484        cfq_resort_rr_list(cfqd, cfqq);
2485
2486        if (cfqq == cfqd->active_queue)
2487                cfqd->active_queue = NULL;
2488
2489        if (cfqd->active_cic) {
2490                put_io_context(cfqd->active_cic->icq.ioc);
2491                cfqd->active_cic = NULL;
2492        }
2493}
2494
2495static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2496{
2497        struct cfq_queue *cfqq = cfqd->active_queue;
2498
2499        if (cfqq)
2500                __cfq_slice_expired(cfqd, cfqq, timed_out);
2501}
2502
2503/*
2504 * Get next queue for service. Unless we have a queue preemption,
2505 * we'll simply select the first cfqq in the service tree.
2506 */
2507static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2508{
2509        struct cfq_rb_root *st = st_for(cfqd->serving_group,
2510                        cfqd->serving_wl_class, cfqd->serving_wl_type);
2511
2512        if (!cfqd->rq_queued)
2513                return NULL;
2514
2515        /* There is nothing to dispatch */
2516        if (!st)
2517                return NULL;
2518        if (RB_EMPTY_ROOT(&st->rb))
2519                return NULL;
2520        return cfq_rb_first(st);
2521}
2522
2523static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2524{
2525        struct cfq_group *cfqg;
2526        struct cfq_queue *cfqq;
2527        int i, j;
2528        struct cfq_rb_root *st;
2529
2530        if (!cfqd->rq_queued)
2531                return NULL;
2532
2533        cfqg = cfq_get_next_cfqg(cfqd);
2534        if (!cfqg)
2535                return NULL;
2536
2537        for_each_cfqg_st(cfqg, i, j, st)
2538                if ((cfqq = cfq_rb_first(st)) != NULL)
2539                        return cfqq;
2540        return NULL;
2541}
2542
2543/*
2544 * Get and set a new active queue for service.
2545 */
2546static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2547                                              struct cfq_queue *cfqq)
2548{
2549        if (!cfqq)
2550                cfqq = cfq_get_next_queue(cfqd);
2551
2552        __cfq_set_active_queue(cfqd, cfqq);
2553        return cfqq;
2554}
2555
2556static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2557                                          struct request *rq)
2558{
2559        if (blk_rq_pos(rq) >= cfqd->last_position)
2560                return blk_rq_pos(rq) - cfqd->last_position;
2561        else
2562                return cfqd->last_position - blk_rq_pos(rq);
2563}
2564
2565static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2566                               struct request *rq)
2567{
2568        return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2569}
2570
2571static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2572                                    struct cfq_queue *cur_cfqq)
2573{
2574        struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2575        struct rb_node *parent, *node;
2576        struct cfq_queue *__cfqq;
2577        sector_t sector = cfqd->last_position;
2578
2579        if (RB_EMPTY_ROOT(root))
2580                return NULL;
2581
2582        /*
2583         * First, if we find a request starting at the end of the last
2584         * request, choose it.
2585         */
2586        __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2587        if (__cfqq)
2588                return __cfqq;
2589
2590        /*
2591         * If the exact sector wasn't found, the parent of the NULL leaf
2592         * will contain the closest sector.
2593         */
2594        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2595        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2596                return __cfqq;
2597
2598        if (blk_rq_pos(__cfqq->next_rq) < sector)
2599                node = rb_next(&__cfqq->p_node);
2600        else
2601                node = rb_prev(&__cfqq->p_node);
2602        if (!node)
2603                return NULL;
2604
2605        __cfqq = rb_entry(node, struct cfq_queue, p_node);
2606        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2607                return __cfqq;
2608
2609        return NULL;
2610}
2611
2612/*
2613 * cfqd - obvious
2614 * cur_cfqq - passed in so that we don't decide that the current queue is
2615 *            closely cooperating with itself.
2616 *
2617 * So, basically we're assuming that that cur_cfqq has dispatched at least
2618 * one request, and that cfqd->last_position reflects a position on the disk
2619 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2620 * assumption.
2621 */
2622static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2623                                              struct cfq_queue *cur_cfqq)
2624{
2625        struct cfq_queue *cfqq;
2626
2627        if (cfq_class_idle(cur_cfqq))
2628                return NULL;
2629        if (!cfq_cfqq_sync(cur_cfqq))
2630                return NULL;
2631        if (CFQQ_SEEKY(cur_cfqq))
2632                return NULL;
2633
2634        /*
2635         * Don't search priority tree if it's the only queue in the group.
2636         */
2637        if (cur_cfqq->cfqg->nr_cfqq == 1)
2638                return NULL;
2639
2640        /*
2641         * We should notice if some of the queues are cooperating, eg
2642         * working closely on the same area of the disk. In that case,
2643         * we can group them together and don't waste time idling.
2644         */
2645        cfqq = cfqq_close(cfqd, cur_cfqq);
2646        if (!cfqq)
2647                return NULL;
2648
2649        /* If new queue belongs to different cfq_group, don't choose it */
2650        if (cur_cfqq->cfqg != cfqq->cfqg)
2651                return NULL;
2652
2653        /*
2654         * It only makes sense to merge sync queues.
2655         */
2656        if (!cfq_cfqq_sync(cfqq))
2657                return NULL;
2658        if (CFQQ_SEEKY(cfqq))
2659                return NULL;
2660
2661        /*
2662         * Do not merge queues of different priority classes
2663         */
2664        if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2665                return NULL;
2666
2667        return cfqq;
2668}
2669
2670/*
2671 * Determine whether we should enforce idle window for this queue.
2672 */
2673
2674static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2675{
2676        enum wl_class_t wl_class = cfqq_class(cfqq);
2677        struct cfq_rb_root *st = cfqq->service_tree;
2678
2679        BUG_ON(!st);
2680        BUG_ON(!st->count);
2681
2682        if (!cfqd->cfq_slice_idle)
2683                return false;
2684
2685        /* We never do for idle class queues. */
2686        if (wl_class == IDLE_WORKLOAD)
2687                return false;
2688
2689        /* We do for queues that were marked with idle window flag. */
2690        if (cfq_cfqq_idle_window(cfqq) &&
2691           !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2692                return true;
2693
2694        /*
2695         * Otherwise, we do only if they are the last ones
2696         * in their service tree.
2697         */
2698        if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2699           !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2700                return true;
2701        cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2702        return false;
2703}
2704
2705static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2706{
2707        struct cfq_queue *cfqq = cfqd->active_queue;
2708        struct cfq_io_cq *cic;
2709        unsigned long sl, group_idle = 0;
2710
2711        /*
2712         * SSD device without seek penalty, disable idling. But only do so
2713         * for devices that support queuing, otherwise we still have a problem
2714         * with sync vs async workloads.
2715         */
2716        if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2717                return;
2718
2719        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2720        WARN_ON(cfq_cfqq_slice_new(cfqq));
2721
2722        /*
2723         * idle is disabled, either manually or by past process history
2724         */
2725        if (!cfq_should_idle(cfqd, cfqq)) {
2726                /* no queue idling. Check for group idling */
2727                if (cfqd->cfq_group_idle)
2728                        group_idle = cfqd->cfq_group_idle;
2729                else
2730                        return;
2731        }
2732
2733        /*
2734         * still active requests from this queue, don't idle
2735         */
2736        if (cfqq->dispatched)
2737                return;
2738
2739        /*
2740         * task has exited, don't wait
2741         */
2742        cic = cfqd->active_cic;
2743        if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2744                return;
2745
2746        /*
2747         * If our average think time is larger than the remaining time
2748         * slice, then don't idle. This avoids overrunning the allotted
2749         * time slice.
2750         */
2751        if (sample_valid(cic->ttime.ttime_samples) &&
2752            (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2753                cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2754                             cic->ttime.ttime_mean);
2755                return;
2756        }
2757
2758        /* There are other queues in the group, don't do group idle */
2759        if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2760                return;
2761
2762        cfq_mark_cfqq_wait_request(cfqq);
2763
2764        if (group_idle)
2765                sl = cfqd->cfq_group_idle;
2766        else
2767                sl = cfqd->cfq_slice_idle;
2768
2769        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2770        cfqg_stats_set_start_idle_time(cfqq->cfqg);
2771        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2772                        group_idle ? 1 : 0);
2773}
2774
2775/*
2776 * Move request from internal lists to the request queue dispatch list.
2777 */
2778static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2779{
2780        struct cfq_data *cfqd = q->elevator->elevator_data;
2781        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2782
2783        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2784
2785        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2786        cfq_remove_request(rq);
2787        cfqq->dispatched++;
2788        (RQ_CFQG(rq))->dispatched++;
2789        elv_dispatch_sort(q, rq);
2790
2791        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2792        cfqq->nr_sectors += blk_rq_sectors(rq);
2793        cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2794}
2795
2796/*
2797 * return expired entry, or NULL to just start from scratch in rbtree
2798 */
2799static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2800{
2801        struct request *rq = NULL;
2802
2803        if (cfq_cfqq_fifo_expire(cfqq))
2804                return NULL;
2805
2806        cfq_mark_cfqq_fifo_expire(cfqq);
2807
2808        if (list_empty(&cfqq->fifo))
2809                return NULL;
2810
2811        rq = rq_entry_fifo(cfqq->fifo.next);
2812        if (time_before(jiffies, rq_fifo_time(rq)))
2813                rq = NULL;
2814
2815        return rq;
2816}
2817
2818static inline int
2819cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2820{
2821        const int base_rq = cfqd->cfq_slice_async_rq;
2822
2823        WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2824
2825        return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2826}
2827
2828/*
2829 * Must be called with the queue_lock held.
2830 */
2831static int cfqq_process_refs(struct cfq_queue *cfqq)
2832{
2833        int process_refs, io_refs;
2834
2835        io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2836        process_refs = cfqq->ref - io_refs;
2837        BUG_ON(process_refs < 0);
2838        return process_refs;
2839}
2840
2841static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2842{
2843        int process_refs, new_process_refs;
2844        struct cfq_queue *__cfqq;
2845
2846        /*
2847         * If there are no process references on the new_cfqq, then it is
2848         * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2849         * chain may have dropped their last reference (not just their
2850         * last process reference).
2851         */
2852        if (!cfqq_process_refs(new_cfqq))
2853                return;
2854
2855        /* Avoid a circular list and skip interim queue merges */
2856        while ((__cfqq = new_cfqq->new_cfqq)) {
2857                if (__cfqq == cfqq)
2858                        return;
2859                new_cfqq = __cfqq;
2860        }
2861
2862        process_refs = cfqq_process_refs(cfqq);
2863        new_process_refs = cfqq_process_refs(new_cfqq);
2864        /*
2865         * If the process for the cfqq has gone away, there is no
2866         * sense in merging the queues.
2867         */
2868        if (process_refs == 0 || new_process_refs == 0)
2869                return;
2870
2871        /*
2872         * Merge in the direction of the lesser amount of work.
2873         */
2874        if (new_process_refs >= process_refs) {
2875                cfqq->new_cfqq = new_cfqq;
2876                new_cfqq->ref += process_refs;
2877        } else {
2878                new_cfqq->new_cfqq = cfqq;
2879                cfqq->ref += new_process_refs;
2880        }
2881}
2882
2883static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
2884                        struct cfq_group *cfqg, enum wl_class_t wl_class)
2885{
2886        struct cfq_queue *queue;
2887        int i;
2888        bool key_valid = false;
2889        unsigned long lowest_key = 0;
2890        enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2891
2892        for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2893                /* select the one with lowest rb_key */
2894                queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2895                if (queue &&
2896                    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2897                        lowest_key = queue->rb_key;
2898                        cur_best = i;
2899                        key_valid = true;
2900                }
2901        }
2902
2903        return cur_best;
2904}
2905
2906static void
2907choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
2908{
2909        unsigned slice;
2910        unsigned count;
2911        struct cfq_rb_root *st;
2912        unsigned group_slice;
2913        enum wl_class_t original_class = cfqd->serving_wl_class;
2914
2915        /* Choose next priority. RT > BE > IDLE */
2916        if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2917                cfqd->serving_wl_class = RT_WORKLOAD;
2918        else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2919                cfqd->serving_wl_class = BE_WORKLOAD;
2920        else {
2921                cfqd->serving_wl_class = IDLE_WORKLOAD;
2922                cfqd->workload_expires = jiffies + 1;
2923                return;
2924        }
2925
2926        if (original_class != cfqd->serving_wl_class)
2927                goto new_workload;
2928
2929        /*
2930         * For RT and BE, we have to choose also the type
2931         * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2932         * expiration time
2933         */
2934        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2935        count = st->count;
2936
2937        /*
2938         * check workload expiration, and that we still have other queues ready
2939         */
2940        if (count && !time_after(jiffies, cfqd->workload_expires))
2941                return;
2942
2943new_workload:
2944        /* otherwise select new workload type */
2945        cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
2946                                        cfqd->serving_wl_class);
2947        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2948        count = st->count;
2949
2950        /*
2951         * the workload slice is computed as a fraction of target latency
2952         * proportional to the number of queues in that workload, over
2953         * all the queues in the same priority class
2954         */
2955        group_slice = cfq_group_slice(cfqd, cfqg);
2956
2957        slice = group_slice * count /
2958                max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2959                      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
2960                                        cfqg));
2961
2962        if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
2963                unsigned int tmp;
2964
2965                /*
2966                 * Async queues are currently system wide. Just taking
2967                 * proportion of queues with-in same group will lead to higher
2968                 * async ratio system wide as generally root group is going
2969                 * to have higher weight. A more accurate thing would be to
2970                 * calculate system wide asnc/sync ratio.
2971                 */
2972                tmp = cfqd->cfq_target_latency *
2973                        cfqg_busy_async_queues(cfqd, cfqg);
2974                tmp = tmp/cfqd->busy_queues;
2975                slice = min_t(unsigned, slice, tmp);
2976
2977                /* async workload slice is scaled down according to
2978                 * the sync/async slice ratio. */
2979                slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2980        } else
2981                /* sync workload slice is at least 2 * cfq_slice_idle */
2982                slice = max(slice, 2 * cfqd->cfq_slice_idle);
2983
2984        slice = max_t(unsigned, slice, CFQ_MIN_TT);
2985        cfq_log(cfqd, "workload slice:%d", slice);
2986        cfqd->workload_expires = jiffies + slice;
2987}
2988
2989static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2990{
2991        struct cfq_rb_root *st = &cfqd->grp_service_tree;
2992        struct cfq_group *cfqg;
2993
2994        if (RB_EMPTY_ROOT(&st->rb))
2995                return NULL;
2996        cfqg = cfq_rb_first_group(st);
2997        update_min_vdisktime(st);
2998        return cfqg;
2999}
3000
3001static void cfq_choose_cfqg(struct cfq_data *cfqd)
3002{
3003        struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3004
3005        cfqd->serving_group = cfqg;
3006
3007        /* Restore the workload type data */
3008        if (cfqg->saved_wl_slice) {
3009                cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3010                cfqd->serving_wl_type = cfqg->saved_wl_type;
3011                cfqd->serving_wl_class = cfqg->saved_wl_class;
3012        } else
3013                cfqd->workload_expires = jiffies - 1;
3014
3015        choose_wl_class_and_type(cfqd, cfqg);
3016}
3017
3018/*
3019 * Select a queue for service. If we have a current active queue,
3020 * check whether to continue servicing it, or retrieve and set a new one.
3021 */
3022static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3023{
3024        struct cfq_queue *cfqq, *new_cfqq = NULL;
3025
3026        cfqq = cfqd->active_queue;
3027        if (!cfqq)
3028                goto new_queue;
3029
3030        if (!cfqd->rq_queued)
3031                return NULL;
3032
3033        /*
3034         * We were waiting for group to get backlogged. Expire the queue
3035         */
3036        if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3037                goto expire;
3038
3039        /*
3040         * The active queue has run out of time, expire it and select new.
3041         */
3042        if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3043                /*
3044                 * If slice had not expired at the completion of last request
3045                 * we might not have turned on wait_busy flag. Don't expire
3046                 * the queue yet. Allow the group to get backlogged.
3047                 *
3048                 * The very fact that we have used the slice, that means we
3049                 * have been idling all along on this queue and it should be
3050                 * ok to wait for this request to complete.
3051                 */
3052                if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3053                    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3054                        cfqq = NULL;
3055                        goto keep_queue;
3056                } else
3057                        goto check_group_idle;
3058        }
3059
3060        /*
3061         * The active queue has requests and isn't expired, allow it to
3062         * dispatch.
3063         */
3064        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3065                goto keep_queue;
3066
3067        /*
3068         * If another queue has a request waiting within our mean seek
3069         * distance, let it run.  The expire code will check for close
3070         * cooperators and put the close queue at the front of the service
3071         * tree.  If possible, merge the expiring queue with the new cfqq.
3072         */
3073        new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3074        if (new_cfqq) {
3075                if (!cfqq->new_cfqq)
3076                        cfq_setup_merge(cfqq, new_cfqq);
3077                goto expire;
3078        }
3079
3080        /*
3081         * No requests pending. If the active queue still has requests in
3082         * flight or is idling for a new request, allow either of these
3083         * conditions to happen (or time out) before selecting a new queue.
3084         */
3085        if (timer_pending(&cfqd->idle_slice_timer)) {
3086                cfqq = NULL;
3087                goto keep_queue;
3088        }
3089
3090        /*
3091         * This is a deep seek queue, but the device is much faster than
3092         * the queue can deliver, don't idle
3093         **/
3094        if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3095            (cfq_cfqq_slice_new(cfqq) ||
3096            (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3097                cfq_clear_cfqq_deep(cfqq);
3098                cfq_clear_cfqq_idle_window(cfqq);
3099        }
3100
3101        if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3102                cfqq = NULL;
3103                goto keep_queue;
3104        }
3105
3106        /*
3107         * If group idle is enabled and there are requests dispatched from
3108         * this group, wait for requests to complete.
3109         */
3110check_group_idle:
3111        if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3112            cfqq->cfqg->dispatched &&
3113            !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3114                cfqq = NULL;
3115                goto keep_queue;
3116        }
3117
3118expire:
3119        cfq_slice_expired(cfqd, 0);
3120new_queue:
3121        /*
3122         * Current queue expired. Check if we have to switch to a new
3123         * service tree
3124         */
3125        if (!new_cfqq)
3126                cfq_choose_cfqg(cfqd);
3127
3128        cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3129keep_queue:
3130        return cfqq;
3131}
3132
3133static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3134{
3135        int dispatched = 0;
3136
3137        while (cfqq->next_rq) {
3138                cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3139                dispatched++;
3140        }
3141
3142        BUG_ON(!list_empty(&cfqq->fifo));
3143
3144        /* By default cfqq is not expired if it is empty. Do it explicitly */
3145        __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3146        return dispatched;
3147}
3148
3149/*
3150 * Drain our current requests. Used for barriers and when switching
3151 * io schedulers on-the-fly.
3152 */
3153static int cfq_forced_dispatch(struct cfq_data *cfqd)
3154{
3155        struct cfq_queue *cfqq;
3156        int dispatched = 0;
3157
3158        /* Expire the timeslice of the current active queue first */
3159        cfq_slice_expired(cfqd, 0);
3160        while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3161                __cfq_set_active_queue(cfqd, cfqq);
3162                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3163        }
3164
3165        BUG_ON(cfqd->busy_queues);
3166
3167        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3168        return dispatched;
3169}
3170
3171static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3172        struct cfq_queue *cfqq)
3173{
3174        /* the queue hasn't finished any request, can't estimate */
3175        if (cfq_cfqq_slice_new(cfqq))
3176                return true;
3177        if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3178                cfqq->slice_end))
3179                return true;
3180
3181        return false;
3182}
3183
3184static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3185{
3186        unsigned int max_dispatch;
3187
3188        if (cfq_cfqq_must_dispatch(cfqq))
3189                return true;
3190
3191        /*
3192         * Drain async requests before we start sync IO
3193         */
3194        if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3195                return false;
3196
3197        /*
3198         * If this is an async queue and we have sync IO in flight, let it wait
3199         */
3200        if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3201                return false;
3202
3203        max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3204        if (cfq_class_idle(cfqq))
3205                max_dispatch = 1;
3206
3207        /*
3208         * Does this cfqq already have too much IO in flight?
3209         */
3210        if (cfqq->dispatched >= max_dispatch) {
3211                bool promote_sync = false;
3212                /*
3213                 * idle queue must always only have a single IO in flight
3214                 */
3215                if (cfq_class_idle(cfqq))
3216                        return false;
3217
3218                /*
3219                 * If there is only one sync queue
3220                 * we can ignore async queue here and give the sync
3221                 * queue no dispatch limit. The reason is a sync queue can
3222                 * preempt async queue, limiting the sync queue doesn't make
3223                 * sense. This is useful for aiostress test.
3224                 */
3225                if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3226                        promote_sync = true;
3227
3228                /*
3229                 * We have other queues, don't allow more IO from this one
3230                 */
3231                if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3232                                !promote_sync)
3233                        return false;
3234
3235                /*
3236                 * Sole queue user, no limit
3237                 */
3238                if (cfqd->busy_queues == 1 || promote_sync)
3239                        max_dispatch = -1;
3240                else
3241                        /*
3242                         * Normally we start throttling cfqq when cfq_quantum/2
3243                         * requests have been dispatched. But we can drive
3244                         * deeper queue depths at the beginning of slice
3245                         * subjected to upper limit of cfq_quantum.
3246                         * */
3247                        max_dispatch = cfqd->cfq_quantum;
3248        }
3249
3250        /*
3251         * Async queues must wait a bit before being allowed dispatch.
3252         * We also ramp up the dispatch depth gradually for async IO,
3253         * based on the last sync IO we serviced
3254         */
3255        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3256                unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3257                unsigned int depth;
3258
3259                depth = last_sync / cfqd->cfq_slice[1];
3260                if (!depth && !cfqq->dispatched)
3261                        depth = 1;
3262                if (depth < max_dispatch)
3263                        max_dispatch = depth;
3264        }
3265
3266        /*
3267         * If we're below the current max, allow a dispatch
3268         */
3269        return cfqq->dispatched < max_dispatch;
3270}
3271
3272/*
3273 * Dispatch a request from cfqq, moving them to the request queue
3274 * dispatch list.
3275 */
3276static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3277{
3278        struct request *rq;
3279
3280        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3281
3282        rq = cfq_check_fifo(cfqq);
3283        if (rq)
3284                cfq_mark_cfqq_must_dispatch(cfqq);
3285
3286        if (!cfq_may_dispatch(cfqd, cfqq))
3287                return false;
3288
3289        /*
3290         * follow expired path, else get first next available
3291         */
3292        if (!rq)
3293                rq = cfqq->next_rq;
3294        else
3295                cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
3296
3297        /*
3298         * insert request into driver dispatch list
3299         */
3300        cfq_dispatch_insert(cfqd->queue, rq);
3301
3302        if (!cfqd->active_cic) {
3303                struct cfq_io_cq *cic = RQ_CIC(rq);
3304
3305                atomic_long_inc(&cic->icq.ioc->refcount);
3306                cfqd->active_cic = cic;
3307        }
3308
3309        return true;
3310}
3311
3312/*
3313 * Find the cfqq that we need to service and move a request from that to the
3314 * dispatch list
3315 */
3316static int cfq_dispatch_requests(struct request_queue *q, int force)
3317{
3318        struct cfq_data *cfqd = q->elevator->elevator_data;
3319        struct cfq_queue *cfqq;
3320
3321        if (!cfqd->busy_queues)
3322                return 0;
3323
3324        if (unlikely(force))
3325                return cfq_forced_dispatch(cfqd);
3326
3327        cfqq = cfq_select_queue(cfqd);
3328        if (!cfqq)
3329                return 0;
3330
3331        /*
3332         * Dispatch a request from this cfqq, if it is allowed
3333         */
3334        if (!cfq_dispatch_request(cfqd, cfqq))
3335                return 0;
3336
3337        cfqq->slice_dispatch++;
3338        cfq_clear_cfqq_must_dispatch(cfqq);
3339
3340        /*
3341         * expire an async queue immediately if it has used up its slice. idle
3342         * queue always expire after 1 dispatch round.
3343         */
3344        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3345            cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3346            cfq_class_idle(cfqq))) {
3347                cfqq->slice_end = jiffies + 1;
3348                cfq_slice_expired(cfqd, 0);
3349        }
3350
3351        cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3352        return 1;
3353}
3354
3355/*
3356 * task holds one reference to the queue, dropped when task exits. each rq
3357 * in-flight on this queue also holds a reference, dropped when rq is freed.
3358 *
3359 * Each cfq queue took a reference on the parent group. Drop it now.
3360 * queue lock must be held here.
3361 */
3362static void cfq_put_queue(struct cfq_queue *cfqq)
3363{
3364        struct cfq_data *cfqd = cfqq->cfqd;
3365        struct cfq_group *cfqg;
3366
3367        BUG_ON(cfqq->ref <= 0);
3368
3369        cfqq->ref--;
3370        if (cfqq->ref)
3371                return;
3372
3373        cfq_log_cfqq(cfqd, cfqq, "put_queue");
3374        BUG_ON(rb_first(&cfqq->sort_list));
3375        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3376        cfqg = cfqq->cfqg;
3377
3378        if (unlikely(cfqd->active_queue == cfqq)) {
3379                __cfq_slice_expired(cfqd, cfqq, 0);
3380                cfq_schedule_dispatch(cfqd);
3381        }
3382
3383        BUG_ON(cfq_cfqq_on_rr(cfqq));
3384        kmem_cache_free(cfq_pool, cfqq);
3385        cfqg_put(cfqg);
3386}
3387
3388static void cfq_put_cooperator(struct cfq_queue *cfqq)
3389{
3390        struct cfq_queue *__cfqq, *next;
3391
3392        /*
3393         * If this queue was scheduled to merge with another queue, be
3394         * sure to drop the reference taken on that queue (and others in
3395         * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3396         */
3397        __cfqq = cfqq->new_cfqq;
3398        while (__cfqq) {
3399                if (__cfqq == cfqq) {
3400                        WARN(1, "cfqq->new_cfqq loop detected\n");
3401                        break;
3402                }
3403                next = __cfqq->new_cfqq;
3404                cfq_put_queue(__cfqq);
3405                __cfqq = next;
3406        }
3407}
3408
3409static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3410{
3411        if (unlikely(cfqq == cfqd->active_queue)) {
3412                __cfq_slice_expired(cfqd, cfqq, 0);
3413                cfq_schedule_dispatch(cfqd);
3414        }
3415
3416        cfq_put_cooperator(cfqq);
3417
3418        cfq_put_queue(cfqq);
3419}
3420
3421static void cfq_init_icq(struct io_cq *icq)
3422{
3423        struct cfq_io_cq *cic = icq_to_cic(icq);
3424
3425        cic->ttime.last_end_request = jiffies;
3426}
3427
3428static void cfq_exit_icq(struct io_cq *icq)
3429{
3430        struct cfq_io_cq *cic = icq_to_cic(icq);
3431        struct cfq_data *cfqd = cic_to_cfqd(cic);
3432
3433        if (cic->cfqq[BLK_RW_ASYNC]) {
3434                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3435                cic->cfqq[BLK_RW_ASYNC] = NULL;
3436        }
3437
3438        if (cic->cfqq[BLK_RW_SYNC]) {
3439                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3440                cic->cfqq[BLK_RW_SYNC] = NULL;
3441        }
3442}
3443
3444static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3445{
3446        struct task_struct *tsk = current;
3447        int ioprio_class;
3448
3449        if (!cfq_cfqq_prio_changed(cfqq))
3450                return;
3451
3452        ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3453        switch (ioprio_class) {
3454        default:
3455                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3456        case IOPRIO_CLASS_NONE:
3457                /*
3458                 * no prio set, inherit CPU scheduling settings
3459                 */
3460                cfqq->ioprio = task_nice_ioprio(tsk);
3461                cfqq->ioprio_class = task_nice_ioclass(tsk);
3462                break;
3463        case IOPRIO_CLASS_RT:
3464                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3465                cfqq->ioprio_class = IOPRIO_CLASS_RT;
3466                break;
3467        case IOPRIO_CLASS_BE:
3468                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3469                cfqq->ioprio_class = IOPRIO_CLASS_BE;
3470                break;
3471        case IOPRIO_CLASS_IDLE:
3472                cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3473                cfqq->ioprio = 7;
3474                cfq_clear_cfqq_idle_window(cfqq);
3475                break;
3476        }
3477
3478        /*
3479         * keep track of original prio settings in case we have to temporarily
3480         * elevate the priority of this queue
3481         */
3482        cfqq->org_ioprio = cfqq->ioprio;
3483        cfq_clear_cfqq_prio_changed(cfqq);
3484}
3485
3486static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3487{
3488        int ioprio = cic->icq.ioc->ioprio;
3489        struct cfq_data *cfqd = cic_to_cfqd(cic);
3490        struct cfq_queue *cfqq;
3491
3492        /*
3493         * Check whether ioprio has changed.  The condition may trigger
3494         * spuriously on a newly created cic but there's no harm.
3495         */
3496        if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3497                return;
3498
3499        cfqq = cic->cfqq[BLK_RW_ASYNC];
3500        if (cfqq) {
3501                struct cfq_queue *new_cfqq;
3502                new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3503                                         GFP_ATOMIC);
3504                if (new_cfqq) {
3505                        cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3506                        cfq_put_queue(cfqq);
3507                }
3508        }
3509
3510        cfqq = cic->cfqq[BLK_RW_SYNC];
3511        if (cfqq)
3512                cfq_mark_cfqq_prio_changed(cfqq);
3513
3514        cic->ioprio = ioprio;
3515}
3516
3517static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3518                          pid_t pid, bool is_sync)
3519{
3520        RB_CLEAR_NODE(&cfqq->rb_node);
3521        RB_CLEAR_NODE(&cfqq->p_node);
3522        INIT_LIST_HEAD(&cfqq->fifo);
3523
3524        cfqq->ref = 0;
3525        cfqq->cfqd = cfqd;
3526
3527        cfq_mark_cfqq_prio_changed(cfqq);
3528
3529        if (is_sync) {
3530                if (!cfq_class_idle(cfqq))
3531                        cfq_mark_cfqq_idle_window(cfqq);
3532                cfq_mark_cfqq_sync(cfqq);
3533        }
3534        cfqq->pid = pid;
3535}
3536
3537#ifdef CONFIG_CFQ_GROUP_IOSCHED
3538static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3539{
3540        struct cfq_data *cfqd = cic_to_cfqd(cic);
3541        struct cfq_queue *sync_cfqq;
3542        uint64_t id;
3543
3544        rcu_read_lock();
3545        id = bio_blkcg(bio)->id;
3546        rcu_read_unlock();
3547
3548        /*
3549         * Check whether blkcg has changed.  The condition may trigger
3550         * spuriously on a newly created cic but there's no harm.
3551         */
3552        if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3553                return;
3554
3555        sync_cfqq = cic_to_cfqq(cic, 1);
3556        if (sync_cfqq) {
3557                /*
3558                 * Drop reference to sync queue. A new sync queue will be
3559                 * assigned in new group upon arrival of a fresh request.
3560                 */
3561                cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3562                cic_set_cfqq(cic, NULL, 1);
3563                cfq_put_queue(sync_cfqq);
3564        }
3565
3566        cic->blkcg_id = id;
3567}
3568#else
3569static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3570#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3571
3572static struct cfq_queue *
3573cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3574                     struct bio *bio, gfp_t gfp_mask)
3575{
3576        struct blkcg *blkcg;
3577        struct cfq_queue *cfqq, *new_cfqq = NULL;
3578        struct cfq_group *cfqg;
3579
3580retry:
3581        rcu_read_lock();
3582
3583        blkcg = bio_blkcg(bio);
3584        cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3585        if (!cfqg) {
3586                cfqq = &cfqd->oom_cfqq;
3587                goto out;
3588        }
3589
3590        cfqq = cic_to_cfqq(cic, is_sync);
3591
3592        /*
3593         * Always try a new alloc if we fell back to the OOM cfqq
3594         * originally, since it should just be a temporary situation.
3595         */
3596        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3597                cfqq = NULL;
3598                if (new_cfqq) {
3599                        cfqq = new_cfqq;
3600                        new_cfqq = NULL;
3601                } else if (gfp_mask & __GFP_WAIT) {
3602                        rcu_read_unlock();
3603                        spin_unlock_irq(cfqd->queue->queue_lock);
3604                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
3605                                        gfp_mask | __GFP_ZERO,
3606                                        cfqd->queue->node);
3607                        spin_lock_irq(cfqd->queue->queue_lock);
3608                        if (new_cfqq)
3609                                goto retry;
3610                        else
3611                                return &cfqd->oom_cfqq;
3612                } else {
3613                        cfqq = kmem_cache_alloc_node(cfq_pool,
3614                                        gfp_mask | __GFP_ZERO,
3615                                        cfqd->queue->node);
3616                }
3617
3618                if (cfqq) {
3619                        cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3620                        cfq_init_prio_data(cfqq, cic);
3621                        cfq_link_cfqq_cfqg(cfqq, cfqg);
3622                        cfq_log_cfqq(cfqd, cfqq, "alloced");
3623                } else
3624                        cfqq = &cfqd->oom_cfqq;
3625        }
3626out:
3627        if (new_cfqq)
3628                kmem_cache_free(cfq_pool, new_cfqq);
3629
3630        rcu_read_unlock();
3631        return cfqq;
3632}
3633
3634static struct cfq_queue **
3635cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3636{
3637        switch (ioprio_class) {
3638        case IOPRIO_CLASS_RT:
3639                return &cfqd->async_cfqq[0][ioprio];
3640        case IOPRIO_CLASS_NONE:
3641                ioprio = IOPRIO_NORM;
3642                /* fall through */
3643        case IOPRIO_CLASS_BE:
3644                return &cfqd->async_cfqq[1][ioprio];
3645        case IOPRIO_CLASS_IDLE:
3646                return &cfqd->async_idle_cfqq;
3647        default:
3648                BUG();
3649        }
3650}
3651
3652static struct cfq_queue *
3653cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3654              struct bio *bio, gfp_t gfp_mask)
3655{
3656        int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3657        int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3658        struct cfq_queue **async_cfqq = NULL;
3659        struct cfq_queue *cfqq = NULL;
3660
3661        if (!is_sync) {
3662                if (!ioprio_valid(cic->ioprio)) {
3663                        struct task_struct *tsk = current;
3664                        ioprio = task_nice_ioprio(tsk);
3665                        ioprio_class = task_nice_ioclass(tsk);
3666                }
3667                async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3668                cfqq = *async_cfqq;
3669        }
3670
3671        if (!cfqq)
3672                cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3673
3674        /*
3675         * pin the queue now that it's allocated, scheduler exit will prune it
3676         */
3677        if (!is_sync && !(*async_cfqq)) {
3678                cfqq->ref++;
3679                *async_cfqq = cfqq;
3680        }
3681
3682        cfqq->ref++;
3683        return cfqq;
3684}
3685
3686static void
3687__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3688{
3689        unsigned long elapsed = jiffies - ttime->last_end_request;
3690        elapsed = min(elapsed, 2UL * slice_idle);
3691
3692        ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3693        ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3694        ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3695}
3696
3697static void
3698cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3699                        struct cfq_io_cq *cic)
3700{
3701        if (cfq_cfqq_sync(cfqq)) {
3702                __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3703                __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3704                        cfqd->cfq_slice_idle);
3705        }
3706#ifdef CONFIG_CFQ_GROUP_IOSCHED
3707        __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3708#endif
3709}
3710
3711static void
3712cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3713                       struct request *rq)
3714{
3715        sector_t sdist = 0;
3716        sector_t n_sec = blk_rq_sectors(rq);
3717        if (cfqq->last_request_pos) {
3718                if (cfqq->last_request_pos < blk_rq_pos(rq))
3719                        sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3720                else
3721                        sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3722        }
3723
3724        cfqq->seek_history <<= 1;
3725        if (blk_queue_nonrot(cfqd->queue))
3726                cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3727        else
3728                cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3729}
3730
3731/*
3732 * Disable idle window if the process thinks too long or seeks so much that
3733 * it doesn't matter
3734 */
3735static void
3736cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3737                       struct cfq_io_cq *cic)
3738{
3739        int old_idle, enable_idle;
3740
3741        /*
3742         * Don't idle for async or idle io prio class
3743         */
3744        if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3745                return;
3746
3747        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3748
3749        if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3750                cfq_mark_cfqq_deep(cfqq);
3751
3752        if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3753                enable_idle = 0;
3754        else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3755                 !cfqd->cfq_slice_idle ||
3756                 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3757                enable_idle = 0;
3758        else if (sample_valid(cic->ttime.ttime_samples)) {
3759                if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3760                        enable_idle = 0;
3761                else
3762                        enable_idle = 1;
3763        }
3764
3765        if (old_idle != enable_idle) {
3766                cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3767                if (enable_idle)
3768                        cfq_mark_cfqq_idle_window(cfqq);
3769                else
3770                        cfq_clear_cfqq_idle_window(cfqq);
3771        }
3772}
3773
3774/*
3775 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3776 * no or if we aren't sure, a 1 will cause a preempt.
3777 */
3778static bool
3779cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3780                   struct request *rq)
3781{
3782        struct cfq_queue *cfqq;
3783
3784        cfqq = cfqd->active_queue;
3785        if (!cfqq)
3786                return false;
3787
3788        if (cfq_class_idle(new_cfqq))
3789                return false;
3790
3791        if (cfq_class_idle(cfqq))
3792                return true;
3793
3794        /*
3795         * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3796         */
3797        if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3798                return false;
3799
3800        /*
3801         * if the new request is sync, but the currently running queue is
3802         * not, let the sync request have priority.
3803         */
3804        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
3805                return true;
3806
3807        if (new_cfqq->cfqg != cfqq->cfqg)
3808                return false;
3809
3810        if (cfq_slice_used(cfqq))
3811                return true;
3812
3813        /* Allow preemption only if we are idling on sync-noidle tree */
3814        if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3815            cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3816            new_cfqq->service_tree->count == 2 &&
3817            RB_EMPTY_ROOT(&cfqq->sort_list))
3818                return true;
3819
3820        /*
3821         * So both queues are sync. Let the new request get disk time if
3822         * it's a metadata request and the current queue is doing regular IO.
3823         */
3824        if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3825                return true;
3826
3827        /*
3828         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3829         */
3830        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3831                return true;
3832
3833        /* An idle queue should not be idle now for some reason */
3834        if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3835                return true;
3836
3837        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3838                return false;
3839
3840        /*
3841         * if this request is as-good as one we would expect from the
3842         * current cfqq, let it preempt
3843         */
3844        if (cfq_rq_close(cfqd, cfqq, rq))
3845                return true;
3846
3847        return false;
3848}
3849
3850/*
3851 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3852 * let it have half of its nominal slice.
3853 */
3854static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3855{
3856        enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3857
3858        cfq_log_cfqq(cfqd, cfqq, "preempt");
3859        cfq_slice_expired(cfqd, 1);
3860
3861        /*
3862         * workload type is changed, don't save slice, otherwise preempt
3863         * doesn't happen
3864         */
3865        if (old_type != cfqq_type(cfqq))
3866                cfqq->cfqg->saved_wl_slice = 0;
3867
3868        /*
3869         * Put the new queue at the front of the of the current list,
3870         * so we know that it will be selected next.
3871         */
3872        BUG_ON(!cfq_cfqq_on_rr(cfqq));
3873
3874        cfq_service_tree_add(cfqd, cfqq, 1);
3875
3876        cfqq->slice_end = 0;
3877        cfq_mark_cfqq_slice_new(cfqq);
3878}
3879
3880/*
3881 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3882 * something we should do about it
3883 */
3884static void
3885cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3886                struct request *rq)
3887{
3888        struct cfq_io_cq *cic = RQ_CIC(rq);
3889
3890        cfqd->rq_queued++;
3891        if (rq->cmd_flags & REQ_PRIO)
3892                cfqq->prio_pending++;
3893
3894        cfq_update_io_thinktime(cfqd, cfqq, cic);
3895        cfq_update_io_seektime(cfqd, cfqq, rq);
3896        cfq_update_idle_window(cfqd, cfqq, cic);
3897
3898        cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3899
3900        if (cfqq == cfqd->active_queue) {
3901                /*
3902                 * Remember that we saw a request from this process, but
3903                 * don't start queuing just yet. Otherwise we risk seeing lots
3904                 * of tiny requests, because we disrupt the normal plugging
3905                 * and merging. If the request is already larger than a single
3906                 * page, let it rip immediately. For that case we assume that
3907                 * merging is already done. Ditto for a busy system that
3908                 * has other work pending, don't risk delaying until the
3909                 * idle timer unplug to continue working.
3910                 */
3911                if (cfq_cfqq_wait_request(cfqq)) {
3912                        if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3913                            cfqd->busy_queues > 1) {
3914                                cfq_del_timer(cfqd, cfqq);
3915                                cfq_clear_cfqq_wait_request(cfqq);
3916                                __blk_run_queue(cfqd->queue);
3917                        } else {
3918                                cfqg_stats_update_idle_time(cfqq->cfqg);
3919                                cfq_mark_cfqq_must_dispatch(cfqq);
3920                        }
3921                }
3922        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3923                /*
3924                 * not the active queue - expire current slice if it is
3925                 * idle and has expired it's mean thinktime or this new queue
3926                 * has some old slice time left and is of higher priority or
3927                 * this new queue is RT and the current one is BE
3928                 */
3929                cfq_preempt_queue(cfqd, cfqq);
3930                __blk_run_queue(cfqd->queue);
3931        }
3932}
3933
3934static void cfq_insert_request(struct request_queue *q, struct request *rq)
3935{
3936        struct cfq_data *cfqd = q->elevator->elevator_data;
3937        struct cfq_queue *cfqq = RQ_CFQQ(rq);
3938
3939        cfq_log_cfqq(cfqd, cfqq, "insert_request");
3940        cfq_init_prio_data(cfqq, RQ_CIC(rq));
3941
3942        rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3943        list_add_tail(&rq->queuelist, &cfqq->fifo);
3944        cfq_add_rq_rb(rq);
3945        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3946                                 rq->cmd_flags);
3947        cfq_rq_enqueued(cfqd, cfqq, rq);
3948}
3949
3950/*
3951 * Update hw_tag based on peak queue depth over 50 samples under
3952 * sufficient load.
3953 */
3954static void cfq_update_hw_tag(struct cfq_data *cfqd)
3955{
3956        struct cfq_queue *cfqq = cfqd->active_queue;
3957
3958        if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3959                cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3960
3961        if (cfqd->hw_tag == 1)
3962                return;
3963
3964        if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3965            cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3966                return;
3967
3968        /*
3969         * If active queue hasn't enough requests and can idle, cfq might not
3970         * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3971         * case
3972         */
3973        if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3974            cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3975            CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3976                return;
3977
3978        if (cfqd->hw_tag_samples++ < 50)
3979                return;
3980
3981        if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3982                cfqd->hw_tag = 1;
3983        else
3984                cfqd->hw_tag = 0;
3985}
3986
3987static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3988{
3989        struct cfq_io_cq *cic = cfqd->active_cic;
3990
3991        /* If the queue already has requests, don't wait */
3992        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3993                return false;
3994
3995        /* If there are other queues in the group, don't wait */
3996        if (cfqq->cfqg->nr_cfqq > 1)
3997                return false;
3998
3999        /* the only queue in the group, but think time is big */
4000        if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4001                return false;
4002
4003        if (cfq_slice_used(cfqq))
4004                return true;
4005
4006        /* if slice left is less than think time, wait busy */
4007        if (cic && sample_valid(cic->ttime.ttime_samples)
4008            && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
4009                return true;
4010
4011        /*
4012         * If think times is less than a jiffy than ttime_mean=0 and above
4013         * will not be true. It might happen that slice has not expired yet
4014         * but will expire soon (4-5 ns) during select_queue(). To cover the
4015         * case where think time is less than a jiffy, mark the queue wait
4016         * busy if only 1 jiffy is left in the slice.
4017         */
4018        if (cfqq->slice_end - jiffies == 1)
4019                return true;
4020
4021        return false;
4022}
4023
4024static void cfq_completed_request(struct request_queue *q, struct request *rq)
4025{
4026        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4027        struct cfq_data *cfqd = cfqq->cfqd;
4028        const int sync = rq_is_sync(rq);
4029        unsigned long now;
4030
4031        now = jiffies;
4032        cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4033                     !!(rq->cmd_flags & REQ_NOIDLE));
4034
4035        cfq_update_hw_tag(cfqd);
4036
4037        WARN_ON(!cfqd->rq_in_driver);
4038        WARN_ON(!cfqq->dispatched);
4039        cfqd->rq_in_driver--;
4040        cfqq->dispatched--;
4041        (RQ_CFQG(rq))->dispatched--;
4042        cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4043                                     rq_io_start_time_ns(rq), rq->cmd_flags);
4044
4045        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4046
4047        if (sync) {
4048                struct cfq_rb_root *st;
4049
4050                RQ_CIC(rq)->ttime.last_end_request = now;
4051
4052                if (cfq_cfqq_on_rr(cfqq))
4053                        st = cfqq->service_tree;
4054                else
4055                        st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4056                                        cfqq_type(cfqq));
4057
4058                st->ttime.last_end_request = now;
4059                if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4060                        cfqd->last_delayed_sync = now;
4061        }
4062
4063#ifdef CONFIG_CFQ_GROUP_IOSCHED
4064        cfqq->cfqg->ttime.last_end_request = now;
4065#endif
4066
4067        /*
4068         * If this is the active queue, check if it needs to be expired,
4069         * or if we want to idle in case it has no pending requests.
4070         */
4071        if (cfqd->active_queue == cfqq) {
4072                const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4073
4074                if (cfq_cfqq_slice_new(cfqq)) {
4075                        cfq_set_prio_slice(cfqd, cfqq);
4076                        cfq_clear_cfqq_slice_new(cfqq);
4077                }
4078
4079                /*
4080                 * Should we wait for next request to come in before we expire
4081                 * the queue.
4082                 */
4083                if (cfq_should_wait_busy(cfqd, cfqq)) {
4084                        unsigned long extend_sl = cfqd->cfq_slice_idle;
4085                        if (!cfqd->cfq_slice_idle)
4086                                extend_sl = cfqd->cfq_group_idle;
4087                        cfqq->slice_end = jiffies + extend_sl;
4088                        cfq_mark_cfqq_wait_busy(cfqq);
4089                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4090                }
4091
4092                /*
4093                 * Idling is not enabled on:
4094                 * - expired queues
4095                 * - idle-priority queues
4096                 * - async queues
4097                 * - queues with still some requests queued
4098                 * - when there is a close cooperator
4099                 */
4100                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4101                        cfq_slice_expired(cfqd, 1);
4102                else if (sync && cfqq_empty &&
4103                         !cfq_close_cooperator(cfqd, cfqq)) {
4104                        cfq_arm_slice_timer(cfqd);
4105                }
4106        }
4107
4108        if (!cfqd->rq_in_driver)
4109                cfq_schedule_dispatch(cfqd);
4110}
4111
4112static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4113{
4114        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4115                cfq_mark_cfqq_must_alloc_slice(cfqq);
4116                return ELV_MQUEUE_MUST;
4117        }
4118
4119        return ELV_MQUEUE_MAY;
4120}
4121
4122static int cfq_may_queue(struct request_queue *q, int rw)
4123{
4124        struct cfq_data *cfqd = q->elevator->elevator_data;
4125        struct task_struct *tsk = current;
4126        struct cfq_io_cq *cic;
4127        struct cfq_queue *cfqq;
4128
4129        /*
4130         * don't force setup of a queue from here, as a call to may_queue
4131         * does not necessarily imply that a request actually will be queued.
4132         * so just lookup a possibly existing queue, or return 'may queue'
4133         * if that fails
4134         */
4135        cic = cfq_cic_lookup(cfqd, tsk->io_context);
4136        if (!cic)
4137                return ELV_MQUEUE_MAY;
4138
4139        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4140        if (cfqq) {
4141                cfq_init_prio_data(cfqq, cic);
4142
4143                return __cfq_may_queue(cfqq);
4144        }
4145
4146        return ELV_MQUEUE_MAY;
4147}
4148
4149/*
4150 * queue lock held here
4151 */
4152static void cfq_put_request(struct request *rq)
4153{
4154        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4155
4156        if (cfqq) {
4157                const int rw = rq_data_dir(rq);
4158
4159                BUG_ON(!cfqq->allocated[rw]);
4160                cfqq->allocated[rw]--;
4161
4162                /* Put down rq reference on cfqg */
4163                cfqg_put(RQ_CFQG(rq));
4164                rq->elv.priv[0] = NULL;
4165                rq->elv.priv[1] = NULL;
4166
4167                cfq_put_queue(cfqq);
4168        }
4169}
4170
4171static struct cfq_queue *
4172cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4173                struct cfq_queue *cfqq)
4174{
4175        cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4176        cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4177        cfq_mark_cfqq_coop(cfqq->new_cfqq);
4178        cfq_put_queue(cfqq);
4179        return cic_to_cfqq(cic, 1);
4180}
4181
4182/*
4183 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4184 * was the last process referring to said cfqq.
4185 */
4186static struct cfq_queue *
4187split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4188{
4189        if (cfqq_process_refs(cfqq) == 1) {
4190                cfqq->pid = current->pid;
4191                cfq_clear_cfqq_coop(cfqq);
4192                cfq_clear_cfqq_split_coop(cfqq);
4193                return cfqq;
4194        }
4195
4196        cic_set_cfqq(cic, NULL, 1);
4197
4198        cfq_put_cooperator(cfqq);
4199
4200        cfq_put_queue(cfqq);
4201        return NULL;
4202}
4203/*
4204 * Allocate cfq data structures associated with this request.
4205 */
4206static int
4207cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4208                gfp_t gfp_mask)
4209{
4210        struct cfq_data *cfqd = q->elevator->elevator_data;
4211        struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4212        const int rw = rq_data_dir(rq);
4213        const bool is_sync = rq_is_sync(rq);
4214        struct cfq_queue *cfqq;
4215
4216        might_sleep_if(gfp_mask & __GFP_WAIT);
4217
4218        spin_lock_irq(q->queue_lock);
4219
4220        check_ioprio_changed(cic, bio);
4221        check_blkcg_changed(cic, bio);
4222new_queue:
4223        cfqq = cic_to_cfqq(cic, is_sync);
4224        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4225                cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
4226                cic_set_cfqq(cic, cfqq, is_sync);
4227        } else {
4228                /*
4229                 * If the queue was seeky for too long, break it apart.
4230                 */
4231                if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4232                        cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4233                        cfqq = split_cfqq(cic, cfqq);
4234                        if (!cfqq)
4235                                goto new_queue;
4236                }
4237
4238                /*
4239                 * Check to see if this queue is scheduled to merge with
4240                 * another, closely cooperating queue.  The merging of
4241                 * queues happens here as it must be done in process context.
4242                 * The reference on new_cfqq was taken in merge_cfqqs.
4243                 */
4244                if (cfqq->new_cfqq)
4245                        cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4246        }
4247
4248        cfqq->allocated[rw]++;
4249
4250        cfqq->ref++;
4251        cfqg_get(cfqq->cfqg);
4252        rq->elv.priv[0] = cfqq;
4253        rq->elv.priv[1] = cfqq->cfqg;
4254        spin_unlock_irq(q->queue_lock);
4255        return 0;
4256}
4257
4258static void cfq_kick_queue(struct work_struct *work)
4259{
4260        struct cfq_data *cfqd =
4261                container_of(work, struct cfq_data, unplug_work);
4262        struct request_queue *q = cfqd->queue;
4263
4264        spin_lock_irq(q->queue_lock);
4265        __blk_run_queue(cfqd->queue);
4266        spin_unlock_irq(q->queue_lock);
4267}
4268
4269/*
4270 * Timer running if the active_queue is currently idling inside its time slice
4271 */
4272static void cfq_idle_slice_timer(unsigned long data)
4273{
4274        struct cfq_data *cfqd = (struct cfq_data *) data;
4275        struct cfq_queue *cfqq;
4276        unsigned long flags;
4277        int timed_out = 1;
4278
4279        cfq_log(cfqd, "idle timer fired");
4280
4281        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4282
4283        cfqq = cfqd->active_queue;
4284        if (cfqq) {
4285                timed_out = 0;
4286
4287                /*
4288                 * We saw a request before the queue expired, let it through
4289                 */
4290                if (cfq_cfqq_must_dispatch(cfqq))
4291                        goto out_kick;
4292
4293                /*
4294                 * expired
4295                 */
4296                if (cfq_slice_used(cfqq))
4297                        goto expire;
4298
4299                /*
4300                 * only expire and reinvoke request handler, if there are
4301                 * other queues with pending requests
4302                 */
4303                if (!cfqd->busy_queues)
4304                        goto out_cont;
4305
4306                /*
4307                 * not expired and it has a request pending, let it dispatch
4308                 */
4309                if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4310                        goto out_kick;
4311
4312                /*
4313                 * Queue depth flag is reset only when the idle didn't succeed
4314                 */
4315                cfq_clear_cfqq_deep(cfqq);
4316        }
4317expire:
4318        cfq_slice_expired(cfqd, timed_out);
4319out_kick:
4320        cfq_schedule_dispatch(cfqd);
4321out_cont:
4322        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4323}
4324
4325static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4326{
4327        del_timer_sync(&cfqd->idle_slice_timer);
4328        cancel_work_sync(&cfqd->unplug_work);
4329}
4330
4331static void cfq_put_async_queues(struct cfq_data *cfqd)
4332{
4333        int i;
4334
4335        for (i = 0; i < IOPRIO_BE_NR; i++) {
4336                if (cfqd->async_cfqq[0][i])
4337                        cfq_put_queue(cfqd->async_cfqq[0][i]);
4338                if (cfqd->async_cfqq[1][i])
4339                        cfq_put_queue(cfqd->async_cfqq[1][i]);
4340        }
4341
4342        if (cfqd->async_idle_cfqq)
4343                cfq_put_queue(cfqd->async_idle_cfqq);
4344}
4345
4346static void cfq_exit_queue(struct elevator_queue *e)
4347{
4348        struct cfq_data *cfqd = e->elevator_data;
4349        struct request_queue *q = cfqd->queue;
4350
4351        cfq_shutdown_timer_wq(cfqd);
4352
4353        spin_lock_irq(q->queue_lock);
4354
4355        if (cfqd->active_queue)
4356                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4357
4358        cfq_put_async_queues(cfqd);
4359
4360        spin_unlock_irq(q->queue_lock);
4361
4362        cfq_shutdown_timer_wq(cfqd);
4363
4364#ifdef CONFIG_CFQ_GROUP_IOSCHED
4365        blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4366#else
4367        kfree(cfqd->root_group);
4368#endif
4369        kfree(cfqd);
4370}
4371
4372static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4373{
4374        struct cfq_data *cfqd;
4375        struct blkcg_gq *blkg __maybe_unused;
4376        int i, ret;
4377        struct elevator_queue *eq;
4378
4379        eq = elevator_alloc(q, e);
4380        if (!eq)
4381                return -ENOMEM;
4382
4383        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
4384        if (!cfqd) {
4385                kobject_put(&eq->kobj);
4386                return -ENOMEM;
4387        }
4388        eq->elevator_data = cfqd;
4389
4390        cfqd->queue = q;
4391        spin_lock_irq(q->queue_lock);
4392        q->elevator = eq;
4393        spin_unlock_irq(q->queue_lock);
4394
4395        /* Init root service tree */
4396        cfqd->grp_service_tree = CFQ_RB_ROOT;
4397
4398        /* Init root group and prefer root group over other groups by default */
4399#ifdef CONFIG_CFQ_GROUP_IOSCHED
4400        ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4401        if (ret)
4402                goto out_free;
4403
4404        cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4405#else
4406        ret = -ENOMEM;
4407        cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4408                                        GFP_KERNEL, cfqd->queue->node);
4409        if (!cfqd->root_group)
4410                goto out_free;
4411
4412        cfq_init_cfqg_base(cfqd->root_group);
4413#endif
4414        cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
4415        cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
4416
4417        /*
4418         * Not strictly needed (since RB_ROOT just clears the node and we
4419         * zeroed cfqd on alloc), but better be safe in case someone decides
4420         * to add magic to the rb code
4421         */
4422        for (i = 0; i < CFQ_PRIO_LISTS; i++)
4423                cfqd->prio_trees[i] = RB_ROOT;
4424
4425        /*
4426         * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4427         * Grab a permanent reference to it, so that the normal code flow
4428         * will not attempt to free it.  oom_cfqq is linked to root_group
4429         * but shouldn't hold a reference as it'll never be unlinked.  Lose
4430         * the reference from linking right away.
4431         */
4432        cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4433        cfqd->oom_cfqq.ref++;
4434
4435        spin_lock_irq(q->queue_lock);
4436        cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4437        cfqg_put(cfqd->root_group);
4438        spin_unlock_irq(q->queue_lock);
4439
4440        init_timer(&cfqd->idle_slice_timer);
4441        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4442        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4443
4444        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4445
4446        cfqd->cfq_quantum = cfq_quantum;
4447        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4448        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4449        cfqd->cfq_back_max = cfq_back_max;
4450        cfqd->cfq_back_penalty = cfq_back_penalty;
4451        cfqd->cfq_slice[0] = cfq_slice_async;
4452        cfqd->cfq_slice[1] = cfq_slice_sync;
4453        cfqd->cfq_target_latency = cfq_target_latency;
4454        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4455        cfqd->cfq_slice_idle = cfq_slice_idle;
4456        cfqd->cfq_group_idle = cfq_group_idle;
4457        cfqd->cfq_latency = 1;
4458        cfqd->hw_tag = -1;
4459        /*
4460         * we optimistically start assuming sync ops weren't delayed in last
4461         * second, in order to have larger depth for async operations.
4462         */
4463        cfqd->last_delayed_sync = jiffies - HZ;
4464        return 0;
4465
4466out_free:
4467        kfree(cfqd);
4468        kobject_put(&eq->kobj);
4469        return ret;
4470}
4471
4472/*
4473 * sysfs parts below -->
4474 */
4475static ssize_t
4476cfq_var_show(unsigned int var, char *page)
4477{
4478        return sprintf(page, "%d\n", var);
4479}
4480
4481static ssize_t
4482cfq_var_store(unsigned int *var, const char *page, size_t count)
4483{
4484        char *p = (char *) page;
4485
4486        *var = simple_strtoul(p, &p, 10);
4487        return count;
4488}
4489
4490#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4491static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4492{                                                                       \
4493        struct cfq_data *cfqd = e->elevator_data;                       \
4494        unsigned int __data = __VAR;                                    \
4495        if (__CONV)                                                     \
4496                __data = jiffies_to_msecs(__data);                      \
4497        return cfq_var_show(__data, (page));                            \
4498}
4499SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4500SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4501SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4502SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4503SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4504SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4505SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4506SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4507SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4508SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4509SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4510SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4511#undef SHOW_FUNCTION
4512
4513#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4514static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4515{                                                                       \
4516        struct cfq_data *cfqd = e->elevator_data;                       \
4517        unsigned int __data;                                            \
4518        int ret = cfq_var_store(&__data, (page), count);                \
4519        if (__data < (MIN))                                             \
4520                __data = (MIN);                                         \
4521        else if (__data > (MAX))                                        \
4522                __data = (MAX);                                         \
4523        if (__CONV)                                                     \
4524                *(__PTR) = msecs_to_jiffies(__data);                    \
4525        else                                                            \
4526                *(__PTR) = __data;                                      \
4527        return ret;                                                     \
4528}
4529STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4530STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4531                UINT_MAX, 1);
4532STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4533                UINT_MAX, 1);
4534STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4535STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4536                UINT_MAX, 0);
4537STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4538STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4539STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4540STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4541STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4542                UINT_MAX, 0);
4543STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4544STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4545#undef STORE_FUNCTION
4546
4547#define CFQ_ATTR(name) \
4548        __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4549
4550static struct elv_fs_entry cfq_attrs[] = {
4551        CFQ_ATTR(quantum),
4552        CFQ_ATTR(fifo_expire_sync),
4553        CFQ_ATTR(fifo_expire_async),
4554