linux/block/cfq-iosched.c
<<
>>
Prefs
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
  17#include "blk.h"
  18#include "blk-cgroup.h"
  19
  20/*
  21 * tunables
  22 */
  23/* max queue in one round of service */
  24static const int cfq_quantum = 8;
  25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26/* maximum backwards seek, in KiB */
  27static const int cfq_back_max = 16 * 1024;
  28/* penalty of a backwards seek */
  29static const int cfq_back_penalty = 2;
  30static const int cfq_slice_sync = HZ / 10;
  31static int cfq_slice_async = HZ / 25;
  32static const int cfq_slice_async_rq = 2;
  33static int cfq_slice_idle = HZ / 125;
  34static int cfq_group_idle = HZ / 125;
  35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36static const int cfq_hist_divisor = 4;
  37
  38/*
  39 * offset from end of service tree
  40 */
  41#define CFQ_IDLE_DELAY          (HZ / 5)
  42
  43/*
  44 * below this threshold, we consider thinktime immediate
  45 */
  46#define CFQ_MIN_TT              (2)
  47
  48#define CFQ_SLICE_SCALE         (5)
  49#define CFQ_HW_QUEUE_MIN        (5)
  50#define CFQ_SERVICE_SHIFT       12
  51
  52#define CFQQ_SEEK_THR           (sector_t)(8 * 100)
  53#define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
  54#define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
  55#define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
  56
  57#define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
  58#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
  59#define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
  60
  61static struct kmem_cache *cfq_pool;
  62
  63#define CFQ_PRIO_LISTS          IOPRIO_BE_NR
  64#define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  65#define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  66
  67#define sample_valid(samples)   ((samples) > 80)
  68#define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
  69
  70struct cfq_ttime {
  71        unsigned long last_end_request;
  72
  73        unsigned long ttime_total;
  74        unsigned long ttime_samples;
  75        unsigned long ttime_mean;
  76};
  77
  78/*
  79 * Most of our rbtree usage is for sorting with min extraction, so
  80 * if we cache the leftmost node we don't have to walk down the tree
  81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  82 * move this into the elevator for the rq sorting as well.
  83 */
  84struct cfq_rb_root {
  85        struct rb_root rb;
  86        struct rb_node *left;
  87        unsigned count;
  88        u64 min_vdisktime;
  89        struct cfq_ttime ttime;
  90};
  91#define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
  92                        .ttime = {.last_end_request = jiffies,},}
  93
  94/*
  95 * Per process-grouping structure
  96 */
  97struct cfq_queue {
  98        /* reference count */
  99        int ref;
 100        /* various state flags, see below */
 101        unsigned int flags;
 102        /* parent cfq_data */
 103        struct cfq_data *cfqd;
 104        /* service_tree member */
 105        struct rb_node rb_node;
 106        /* service_tree key */
 107        unsigned long rb_key;
 108        /* prio tree member */
 109        struct rb_node p_node;
 110        /* prio tree root we belong to, if any */
 111        struct rb_root *p_root;
 112        /* sorted list of pending requests */
 113        struct rb_root sort_list;
 114        /* if fifo isn't expired, next request to serve */
 115        struct request *next_rq;
 116        /* requests queued in sort_list */
 117        int queued[2];
 118        /* currently allocated requests */
 119        int allocated[2];
 120        /* fifo list of requests in sort_list */
 121        struct list_head fifo;
 122
 123        /* time when queue got scheduled in to dispatch first request. */
 124        unsigned long dispatch_start;
 125        unsigned int allocated_slice;
 126        unsigned int slice_dispatch;
 127        /* time when first request from queue completed and slice started. */
 128        unsigned long slice_start;
 129        unsigned long slice_end;
 130        long slice_resid;
 131
 132        /* pending priority requests */
 133        int prio_pending;
 134        /* number of requests that are on the dispatch list or inside driver */
 135        int dispatched;
 136
 137        /* io prio of this group */
 138        unsigned short ioprio, org_ioprio;
 139        unsigned short ioprio_class;
 140
 141        pid_t pid;
 142
 143        u32 seek_history;
 144        sector_t last_request_pos;
 145
 146        struct cfq_rb_root *service_tree;
 147        struct cfq_queue *new_cfqq;
 148        struct cfq_group *cfqg;
 149        /* Number of sectors dispatched from queue in single dispatch round */
 150        unsigned long nr_sectors;
 151};
 152
 153/*
 154 * First index in the service_trees.
 155 * IDLE is handled separately, so it has negative index
 156 */
 157enum wl_class_t {
 158        BE_WORKLOAD = 0,
 159        RT_WORKLOAD = 1,
 160        IDLE_WORKLOAD = 2,
 161        CFQ_PRIO_NR,
 162};
 163
 164/*
 165 * Second index in the service_trees.
 166 */
 167enum wl_type_t {
 168        ASYNC_WORKLOAD = 0,
 169        SYNC_NOIDLE_WORKLOAD = 1,
 170        SYNC_WORKLOAD = 2
 171};
 172
 173struct cfqg_stats {
 174#ifdef CONFIG_CFQ_GROUP_IOSCHED
 175        /* total bytes transferred */
 176        struct blkg_rwstat              service_bytes;
 177        /* total IOs serviced, post merge */
 178        struct blkg_rwstat              serviced;
 179        /* number of ios merged */
 180        struct blkg_rwstat              merged;
 181        /* total time spent on device in ns, may not be accurate w/ queueing */
 182        struct blkg_rwstat              service_time;
 183        /* total time spent waiting in scheduler queue in ns */
 184        struct blkg_rwstat              wait_time;
 185        /* number of IOs queued up */
 186        struct blkg_rwstat              queued;
 187        /* total sectors transferred */
 188        struct blkg_stat                sectors;
 189        /* total disk time and nr sectors dispatched by this group */
 190        struct blkg_stat                time;
 191#ifdef CONFIG_DEBUG_BLK_CGROUP
 192        /* time not charged to this cgroup */
 193        struct blkg_stat                unaccounted_time;
 194        /* sum of number of ios queued across all samples */
 195        struct blkg_stat                avg_queue_size_sum;
 196        /* count of samples taken for average */
 197        struct blkg_stat                avg_queue_size_samples;
 198        /* how many times this group has been removed from service tree */
 199        struct blkg_stat                dequeue;
 200        /* total time spent waiting for it to be assigned a timeslice. */
 201        struct blkg_stat                group_wait_time;
 202        /* time spent idling for this blkcg_gq */
 203        struct blkg_stat                idle_time;
 204        /* total time with empty current active q with other requests queued */
 205        struct blkg_stat                empty_time;
 206        /* fields after this shouldn't be cleared on stat reset */
 207        uint64_t                        start_group_wait_time;
 208        uint64_t                        start_idle_time;
 209        uint64_t                        start_empty_time;
 210        uint16_t                        flags;
 211#endif  /* CONFIG_DEBUG_BLK_CGROUP */
 212#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 213};
 214
 215/* This is per cgroup per device grouping structure */
 216struct cfq_group {
 217        /* must be the first member */
 218        struct blkg_policy_data pd;
 219
 220        /* group service_tree member */
 221        struct rb_node rb_node;
 222
 223        /* group service_tree key */
 224        u64 vdisktime;
 225
 226        /*
 227         * The number of active cfqgs and sum of their weights under this
 228         * cfqg.  This covers this cfqg's leaf_weight and all children's
 229         * weights, but does not cover weights of further descendants.
 230         *
 231         * If a cfqg is on the service tree, it's active.  An active cfqg
 232         * also activates its parent and contributes to the children_weight
 233         * of the parent.
 234         */
 235        int nr_active;
 236        unsigned int children_weight;
 237
 238        /*
 239         * vfraction is the fraction of vdisktime that the tasks in this
 240         * cfqg are entitled to.  This is determined by compounding the
 241         * ratios walking up from this cfqg to the root.
 242         *
 243         * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
 244         * vfractions on a service tree is approximately 1.  The sum may
 245         * deviate a bit due to rounding errors and fluctuations caused by
 246         * cfqgs entering and leaving the service tree.
 247         */
 248        unsigned int vfraction;
 249
 250        /*
 251         * There are two weights - (internal) weight is the weight of this
 252         * cfqg against the sibling cfqgs.  leaf_weight is the wight of
 253         * this cfqg against the child cfqgs.  For the root cfqg, both
 254         * weights are kept in sync for backward compatibility.
 255         */
 256        unsigned int weight;
 257        unsigned int new_weight;
 258        unsigned int dev_weight;
 259
 260        unsigned int leaf_weight;
 261        unsigned int new_leaf_weight;
 262        unsigned int dev_leaf_weight;
 263
 264        /* number of cfqq currently on this group */
 265        int nr_cfqq;
 266
 267        /*
 268         * Per group busy queues average. Useful for workload slice calc. We
 269         * create the array for each prio class but at run time it is used
 270         * only for RT and BE class and slot for IDLE class remains unused.
 271         * This is primarily done to avoid confusion and a gcc warning.
 272         */
 273        unsigned int busy_queues_avg[CFQ_PRIO_NR];
 274        /*
 275         * rr lists of queues with requests. We maintain service trees for
 276         * RT and BE classes. These trees are subdivided in subclasses
 277         * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 278         * class there is no subclassification and all the cfq queues go on
 279         * a single tree service_tree_idle.
 280         * Counts are embedded in the cfq_rb_root
 281         */
 282        struct cfq_rb_root service_trees[2][3];
 283        struct cfq_rb_root service_tree_idle;
 284
 285        unsigned long saved_wl_slice;
 286        enum wl_type_t saved_wl_type;
 287        enum wl_class_t saved_wl_class;
 288
 289        /* number of requests that are on the dispatch list or inside driver */
 290        int dispatched;
 291        struct cfq_ttime ttime;
 292        struct cfqg_stats stats;        /* stats for this cfqg */
 293        struct cfqg_stats dead_stats;   /* stats pushed from dead children */
 294};
 295
 296struct cfq_io_cq {
 297        struct io_cq            icq;            /* must be the first member */
 298        struct cfq_queue        *cfqq[2];
 299        struct cfq_ttime        ttime;
 300        int                     ioprio;         /* the current ioprio */
 301#ifdef CONFIG_CFQ_GROUP_IOSCHED
 302        uint64_t                blkcg_id;       /* the current blkcg ID */
 303#endif
 304};
 305
 306/*
 307 * Per block device queue structure
 308 */
 309struct cfq_data {
 310        struct request_queue *queue;
 311        /* Root service tree for cfq_groups */
 312        struct cfq_rb_root grp_service_tree;
 313        struct cfq_group *root_group;
 314
 315        /*
 316         * The priority currently being served
 317         */
 318        enum wl_class_t serving_wl_class;
 319        enum wl_type_t serving_wl_type;
 320        unsigned long workload_expires;
 321        struct cfq_group *serving_group;
 322
 323        /*
 324         * Each priority tree is sorted by next_request position.  These
 325         * trees are used when determining if two or more queues are
 326         * interleaving requests (see cfq_close_cooperator).
 327         */
 328        struct rb_root prio_trees[CFQ_PRIO_LISTS];
 329
 330        unsigned int busy_queues;
 331        unsigned int busy_sync_queues;
 332
 333        int rq_in_driver;
 334        int rq_in_flight[2];
 335
 336        /*
 337         * queue-depth detection
 338         */
 339        int rq_queued;
 340        int hw_tag;
 341        /*
 342         * hw_tag can be
 343         * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 344         *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 345         *  0 => no NCQ
 346         */
 347        int hw_tag_est_depth;
 348        unsigned int hw_tag_samples;
 349
 350        /*
 351         * idle window management
 352         */
 353        struct timer_list idle_slice_timer;
 354        struct work_struct unplug_work;
 355
 356        struct cfq_queue *active_queue;
 357        struct cfq_io_cq *active_cic;
 358
 359        /*
 360         * async queue for each priority case
 361         */
 362        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 363        struct cfq_queue *async_idle_cfqq;
 364
 365        sector_t last_position;
 366
 367        /*
 368         * tunables, see top of file
 369         */
 370        unsigned int cfq_quantum;
 371        unsigned int cfq_fifo_expire[2];
 372        unsigned int cfq_back_penalty;
 373        unsigned int cfq_back_max;
 374        unsigned int cfq_slice[2];
 375        unsigned int cfq_slice_async_rq;
 376        unsigned int cfq_slice_idle;
 377        unsigned int cfq_group_idle;
 378        unsigned int cfq_latency;
 379        unsigned int cfq_target_latency;
 380
 381        /*
 382         * Fallback dummy cfqq for extreme OOM conditions
 383         */
 384        struct cfq_queue oom_cfqq;
 385
 386        unsigned long last_delayed_sync;
 387};
 388
 389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 390
 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
 392                                            enum wl_class_t class,
 393                                            enum wl_type_t type)
 394{
 395        if (!cfqg)
 396                return NULL;
 397
 398        if (class == IDLE_WORKLOAD)
 399                return &cfqg->service_tree_idle;
 400
 401        return &cfqg->service_trees[class][type];
 402}
 403
 404enum cfqq_state_flags {
 405        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
 406        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
 407        CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
 408        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
 409        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 410        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
 411        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
 412        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
 413        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 414        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
 415        CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
 416        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
 417        CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
 418};
 419
 420#define CFQ_CFQQ_FNS(name)                                              \
 421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
 422{                                                                       \
 423        (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 424}                                                                       \
 425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
 426{                                                                       \
 427        (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 428}                                                                       \
 429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
 430{                                                                       \
 431        return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 432}
 433
 434CFQ_CFQQ_FNS(on_rr);
 435CFQ_CFQQ_FNS(wait_request);
 436CFQ_CFQQ_FNS(must_dispatch);
 437CFQ_CFQQ_FNS(must_alloc_slice);
 438CFQ_CFQQ_FNS(fifo_expire);
 439CFQ_CFQQ_FNS(idle_window);
 440CFQ_CFQQ_FNS(prio_changed);
 441CFQ_CFQQ_FNS(slice_new);
 442CFQ_CFQQ_FNS(sync);
 443CFQ_CFQQ_FNS(coop);
 444CFQ_CFQQ_FNS(split_coop);
 445CFQ_CFQQ_FNS(deep);
 446CFQ_CFQQ_FNS(wait_busy);
 447#undef CFQ_CFQQ_FNS
 448
 449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 450{
 451        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 452}
 453
 454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 455{
 456        return pd_to_blkg(&cfqg->pd);
 457}
 458
 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 460
 461/* cfqg stats flags */
 462enum cfqg_stats_flags {
 463        CFQG_stats_waiting = 0,
 464        CFQG_stats_idling,
 465        CFQG_stats_empty,
 466};
 467
 468#define CFQG_FLAG_FNS(name)                                             \
 469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
 470{                                                                       \
 471        stats->flags |= (1 << CFQG_stats_##name);                       \
 472}                                                                       \
 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
 474{                                                                       \
 475        stats->flags &= ~(1 << CFQG_stats_##name);                      \
 476}                                                                       \
 477static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
 478{                                                                       \
 479        return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
 480}                                                                       \
 481
 482CFQG_FLAG_FNS(waiting)
 483CFQG_FLAG_FNS(idling)
 484CFQG_FLAG_FNS(empty)
 485#undef CFQG_FLAG_FNS
 486
 487/* This should be called with the queue_lock held. */
 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 489{
 490        unsigned long long now;
 491
 492        if (!cfqg_stats_waiting(stats))
 493                return;
 494
 495        now = sched_clock();
 496        if (time_after64(now, stats->start_group_wait_time))
 497                blkg_stat_add(&stats->group_wait_time,
 498                              now - stats->start_group_wait_time);
 499        cfqg_stats_clear_waiting(stats);
 500}
 501
 502/* This should be called with the queue_lock held. */
 503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 504                                                 struct cfq_group *curr_cfqg)
 505{
 506        struct cfqg_stats *stats = &cfqg->stats;
 507
 508        if (cfqg_stats_waiting(stats))
 509                return;
 510        if (cfqg == curr_cfqg)
 511                return;
 512        stats->start_group_wait_time = sched_clock();
 513        cfqg_stats_mark_waiting(stats);
 514}
 515
 516/* This should be called with the queue_lock held. */
 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 518{
 519        unsigned long long now;
 520
 521        if (!cfqg_stats_empty(stats))
 522                return;
 523
 524        now = sched_clock();
 525        if (time_after64(now, stats->start_empty_time))
 526                blkg_stat_add(&stats->empty_time,
 527                              now - stats->start_empty_time);
 528        cfqg_stats_clear_empty(stats);
 529}
 530
 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 532{
 533        blkg_stat_add(&cfqg->stats.dequeue, 1);
 534}
 535
 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 537{
 538        struct cfqg_stats *stats = &cfqg->stats;
 539
 540        if (blkg_rwstat_total(&stats->queued))
 541                return;
 542
 543        /*
 544         * group is already marked empty. This can happen if cfqq got new
 545         * request in parent group and moved to this group while being added
 546         * to service tree. Just ignore the event and move on.
 547         */
 548        if (cfqg_stats_empty(stats))
 549                return;
 550
 551        stats->start_empty_time = sched_clock();
 552        cfqg_stats_mark_empty(stats);
 553}
 554
 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 556{
 557        struct cfqg_stats *stats = &cfqg->stats;
 558
 559        if (cfqg_stats_idling(stats)) {
 560                unsigned long long now = sched_clock();
 561
 562                if (time_after64(now, stats->start_idle_time))
 563                        blkg_stat_add(&stats->idle_time,
 564                                      now - stats->start_idle_time);
 565                cfqg_stats_clear_idling(stats);
 566        }
 567}
 568
 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 570{
 571        struct cfqg_stats *stats = &cfqg->stats;
 572
 573        BUG_ON(cfqg_stats_idling(stats));
 574
 575        stats->start_idle_time = sched_clock();
 576        cfqg_stats_mark_idling(stats);
 577}
 578
 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 580{
 581        struct cfqg_stats *stats = &cfqg->stats;
 582
 583        blkg_stat_add(&stats->avg_queue_size_sum,
 584                      blkg_rwstat_total(&stats->queued));
 585        blkg_stat_add(&stats->avg_queue_size_samples, 1);
 586        cfqg_stats_update_group_wait_time(stats);
 587}
 588
 589#else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 590
 591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 598
 599#endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 600
 601#ifdef CONFIG_CFQ_GROUP_IOSCHED
 602
 603static struct blkcg_policy blkcg_policy_cfq;
 604
 605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 606{
 607        return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 608}
 609
 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
 611{
 612        struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
 613
 614        return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 615}
 616
 617static inline void cfqg_get(struct cfq_group *cfqg)
 618{
 619        return blkg_get(cfqg_to_blkg(cfqg));
 620}
 621
 622static inline void cfqg_put(struct cfq_group *cfqg)
 623{
 624        return blkg_put(cfqg_to_blkg(cfqg));
 625}
 626
 627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  do {                    \
 628        char __pbuf[128];                                               \
 629                                                                        \
 630        blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
 631        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
 632                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 633                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 634                          __pbuf, ##args);                              \
 635} while (0)
 636
 637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)  do {                    \
 638        char __pbuf[128];                                               \
 639                                                                        \
 640        blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
 641        blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
 642} while (0)
 643
 644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 645                                            struct cfq_group *curr_cfqg, int rw)
 646{
 647        blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 648        cfqg_stats_end_empty_time(&cfqg->stats);
 649        cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 650}
 651
 652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 653                        unsigned long time, unsigned long unaccounted_time)
 654{
 655        blkg_stat_add(&cfqg->stats.time, time);
 656#ifdef CONFIG_DEBUG_BLK_CGROUP
 657        blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 658#endif
 659}
 660
 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 662{
 663        blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 664}
 665
 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 667{
 668        blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 669}
 670
 671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 672                                              uint64_t bytes, int rw)
 673{
 674        blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
 675        blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
 676        blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
 677}
 678
 679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 680                        uint64_t start_time, uint64_t io_start_time, int rw)
 681{
 682        struct cfqg_stats *stats = &cfqg->stats;
 683        unsigned long long now = sched_clock();
 684
 685        if (time_after64(now, io_start_time))
 686                blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 687        if (time_after64(io_start_time, start_time))
 688                blkg_rwstat_add(&stats->wait_time, rw,
 689                                io_start_time - start_time);
 690}
 691
 692/* @stats = 0 */
 693static void cfqg_stats_reset(struct cfqg_stats *stats)
 694{
 695        /* queued stats shouldn't be cleared */
 696        blkg_rwstat_reset(&stats->service_bytes);
 697        blkg_rwstat_reset(&stats->serviced);
 698        blkg_rwstat_reset(&stats->merged);
 699        blkg_rwstat_reset(&stats->service_time);
 700        blkg_rwstat_reset(&stats->wait_time);
 701        blkg_stat_reset(&stats->time);
 702#ifdef CONFIG_DEBUG_BLK_CGROUP
 703        blkg_stat_reset(&stats->unaccounted_time);
 704        blkg_stat_reset(&stats->avg_queue_size_sum);
 705        blkg_stat_reset(&stats->avg_queue_size_samples);
 706        blkg_stat_reset(&stats->dequeue);
 707        blkg_stat_reset(&stats->group_wait_time);
 708        blkg_stat_reset(&stats->idle_time);
 709        blkg_stat_reset(&stats->empty_time);
 710#endif
 711}
 712
 713/* @to += @from */
 714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
 715{
 716        /* queued stats shouldn't be cleared */
 717        blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
 718        blkg_rwstat_merge(&to->serviced, &from->serviced);
 719        blkg_rwstat_merge(&to->merged, &from->merged);
 720        blkg_rwstat_merge(&to->service_time, &from->service_time);
 721        blkg_rwstat_merge(&to->wait_time, &from->wait_time);
 722        blkg_stat_merge(&from->time, &from->time);
 723#ifdef CONFIG_DEBUG_BLK_CGROUP
 724        blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
 725        blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 726        blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
 727        blkg_stat_merge(&to->dequeue, &from->dequeue);
 728        blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
 729        blkg_stat_merge(&to->idle_time, &from->idle_time);
 730        blkg_stat_merge(&to->empty_time, &from->empty_time);
 731#endif
 732}
 733
 734/*
 735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
 736 * recursive stats can still account for the amount used by this cfqg after
 737 * it's gone.
 738 */
 739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 740{
 741        struct cfq_group *parent = cfqg_parent(cfqg);
 742
 743        lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
 744
 745        if (unlikely(!parent))
 746                return;
 747
 748        cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
 749        cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
 750        cfqg_stats_reset(&cfqg->stats);
 751        cfqg_stats_reset(&cfqg->dead_stats);
 752}
 753
 754#else   /* CONFIG_CFQ_GROUP_IOSCHED */
 755
 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 757static inline void cfqg_get(struct cfq_group *cfqg) { }
 758static inline void cfqg_put(struct cfq_group *cfqg) { }
 759
 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 761        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
 762                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 763                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 764                                ##args)
 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
 766
 767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 768                        struct cfq_group *curr_cfqg, int rw) { }
 769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 770                        unsigned long time, unsigned long unaccounted_time) { }
 771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 774                                              uint64_t bytes, int rw) { }
 775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 776                        uint64_t start_time, uint64_t io_start_time, int rw) { }
 777
 778#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 779
 780#define cfq_log(cfqd, fmt, args...)     \
 781        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 782
 783/* Traverses through cfq group service trees */
 784#define for_each_cfqg_st(cfqg, i, j, st) \
 785        for (i = 0; i <= IDLE_WORKLOAD; i++) \
 786                for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 787                        : &cfqg->service_tree_idle; \
 788                        (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 789                        (i == IDLE_WORKLOAD && j == 0); \
 790                        j++, st = i < IDLE_WORKLOAD ? \
 791                        &cfqg->service_trees[i][j]: NULL) \
 792
 793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 794        struct cfq_ttime *ttime, bool group_idle)
 795{
 796        unsigned long slice;
 797        if (!sample_valid(ttime->ttime_samples))
 798                return false;
 799        if (group_idle)
 800                slice = cfqd->cfq_group_idle;
 801        else
 802                slice = cfqd->cfq_slice_idle;
 803        return ttime->ttime_mean > slice;
 804}
 805
 806static inline bool iops_mode(struct cfq_data *cfqd)
 807{
 808        /*
 809         * If we are not idling on queues and it is a NCQ drive, parallel
 810         * execution of requests is on and measuring time is not possible
 811         * in most of the cases until and unless we drive shallower queue
 812         * depths and that becomes a performance bottleneck. In such cases
 813         * switch to start providing fairness in terms of number of IOs.
 814         */
 815        if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 816                return true;
 817        else
 818                return false;
 819}
 820
 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 822{
 823        if (cfq_class_idle(cfqq))
 824                return IDLE_WORKLOAD;
 825        if (cfq_class_rt(cfqq))
 826                return RT_WORKLOAD;
 827        return BE_WORKLOAD;
 828}
 829
 830
 831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 832{
 833        if (!cfq_cfqq_sync(cfqq))
 834                return ASYNC_WORKLOAD;
 835        if (!cfq_cfqq_idle_window(cfqq))
 836                return SYNC_NOIDLE_WORKLOAD;
 837        return SYNC_WORKLOAD;
 838}
 839
 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
 841                                        struct cfq_data *cfqd,
 842                                        struct cfq_group *cfqg)
 843{
 844        if (wl_class == IDLE_WORKLOAD)
 845                return cfqg->service_tree_idle.count;
 846
 847        return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
 848                cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
 849                cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 850}
 851
 852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 853                                        struct cfq_group *cfqg)
 854{
 855        return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
 856                cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 857}
 858
 859static void cfq_dispatch_insert(struct request_queue *, struct request *);
 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 861                                       struct cfq_io_cq *cic, struct bio *bio,
 862                                       gfp_t gfp_mask);
 863
 864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 865{
 866        /* cic->icq is the first member, %NULL will convert to %NULL */
 867        return container_of(icq, struct cfq_io_cq, icq);
 868}
 869
 870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 871                                               struct io_context *ioc)
 872{
 873        if (ioc)
 874                return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 875        return NULL;
 876}
 877
 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 879{
 880        return cic->cfqq[is_sync];
 881}
 882
 883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 884                                bool is_sync)
 885{
 886        cic->cfqq[is_sync] = cfqq;
 887}
 888
 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 890{
 891        return cic->icq.q->elevator->elevator_data;
 892}
 893
 894/*
 895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 896 * set (in which case it could also be direct WRITE).
 897 */
 898static inline bool cfq_bio_sync(struct bio *bio)
 899{
 900        return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 901}
 902
 903/*
 904 * scheduler run of queue, if there are requests pending and no one in the
 905 * driver that will restart queueing
 906 */
 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 908{
 909        if (cfqd->busy_queues) {
 910                cfq_log(cfqd, "schedule dispatch");
 911                kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 912        }
 913}
 914
 915/*
 916 * Scale schedule slice based on io priority. Use the sync time slice only
 917 * if a queue is marked sync and has sync io queued. A sync queue with async
 918 * io only, should not get full sync slice length.
 919 */
 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 921                                 unsigned short prio)
 922{
 923        const int base_slice = cfqd->cfq_slice[sync];
 924
 925        WARN_ON(prio >= IOPRIO_BE_NR);
 926
 927        return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 928}
 929
 930static inline int
 931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 932{
 933        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 934}
 935
 936/**
 937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
 938 * @charge: disk time being charged
 939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
 940 *
 941 * Scale @charge according to @vfraction, which is in range (0, 1].  The
 942 * scaling is inversely proportional.
 943 *
 944 * scaled = charge / vfraction
 945 *
 946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
 947 */
 948static inline u64 cfqg_scale_charge(unsigned long charge,
 949                                    unsigned int vfraction)
 950{
 951        u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
 952
 953        /* charge / vfraction */
 954        c <<= CFQ_SERVICE_SHIFT;
 955        do_div(c, vfraction);
 956        return c;
 957}
 958
 959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 960{
 961        s64 delta = (s64)(vdisktime - min_vdisktime);
 962        if (delta > 0)
 963                min_vdisktime = vdisktime;
 964
 965        return min_vdisktime;
 966}
 967
 968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 969{
 970        s64 delta = (s64)(vdisktime - min_vdisktime);
 971        if (delta < 0)
 972                min_vdisktime = vdisktime;
 973
 974        return min_vdisktime;
 975}
 976
 977static void update_min_vdisktime(struct cfq_rb_root *st)
 978{
 979        struct cfq_group *cfqg;
 980
 981        if (st->left) {
 982                cfqg = rb_entry_cfqg(st->left);
 983                st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 984                                                  cfqg->vdisktime);
 985        }
 986}
 987
 988/*
 989 * get averaged number of queues of RT/BE priority.
 990 * average is updated, with a formula that gives more weight to higher numbers,
 991 * to quickly follows sudden increases and decrease slowly
 992 */
 993
 994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 995                                        struct cfq_group *cfqg, bool rt)
 996{
 997        unsigned min_q, max_q;
 998        unsigned mult  = cfq_hist_divisor - 1;
 999        unsigned round = cfq_hist_divisor / 2;
1000        unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1001
1002        min_q = min(cfqg->busy_queues_avg[rt], busy);
1003        max_q = max(cfqg->busy_queues_avg[rt], busy);
1004        cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1005                cfq_hist_divisor;
1006        return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
1012        return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1013}
1014
1015static inline unsigned
1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1017{
1018        unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019        if (cfqd->cfq_latency) {
1020                /*
1021                 * interested queues (we consider only the ones with the same
1022                 * priority class in the cfq group)
1023                 */
1024                unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025                                                cfq_class_rt(cfqq));
1026                unsigned sync_slice = cfqd->cfq_slice[1];
1027                unsigned expect_latency = sync_slice * iq;
1028                unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030                if (expect_latency > group_slice) {
1031                        unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032                        /* scale low_slice according to IO priority
1033                         * and sync vs async */
1034                        unsigned low_slice =
1035                                min(slice, base_low_slice * slice / sync_slice);
1036                        /* the adapted slice value is scaled to fit all iqs
1037                         * into the target latency */
1038                        slice = max(slice * group_slice / expect_latency,
1039                                    low_slice);
1040                }
1041        }
1042        return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
1048        unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1049
1050        cfqq->slice_start = jiffies;
1051        cfqq->slice_end = jiffies + slice;
1052        cfqq->allocated_slice = slice;
1053        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
1061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1062{
1063        if (cfq_cfqq_slice_new(cfqq))
1064                return false;
1065        if (time_before(jiffies, cfqq->slice_end))
1066                return false;
1067
1068        return true;
1069}
1070
1071/*
1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1073 * We choose the request that is closest to the head right now. Distance
1074 * behind the head is penalized and only allowed to a certain extent.
1075 */
1076static struct request *
1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1078{
1079        sector_t s1, s2, d1 = 0, d2 = 0;
1080        unsigned long back_max;
1081#define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
1083        unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1084
1085        if (rq1 == NULL || rq1 == rq2)
1086                return rq2;
1087        if (rq2 == NULL)
1088                return rq1;
1089
1090        if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091                return rq_is_sync(rq1) ? rq1 : rq2;
1092
1093        if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094                return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1095
1096        s1 = blk_rq_pos(rq1);
1097        s2 = blk_rq_pos(rq2);
1098
1099        /*
1100         * by definition, 1KiB is 2 sectors
1101         */
1102        back_max = cfqd->cfq_back_max * 2;
1103
1104        /*
1105         * Strict one way elevator _except_ in the case where we allow
1106         * short backward seeks which are biased as twice the cost of a
1107         * similar forward seek.
1108         */
1109        if (s1 >= last)
1110                d1 = s1 - last;
1111        else if (s1 + back_max >= last)
1112                d1 = (last - s1) * cfqd->cfq_back_penalty;
1113        else
1114                wrap |= CFQ_RQ1_WRAP;
1115
1116        if (s2 >= last)
1117                d2 = s2 - last;
1118        else if (s2 + back_max >= last)
1119                d2 = (last - s2) * cfqd->cfq_back_penalty;
1120        else
1121                wrap |= CFQ_RQ2_WRAP;
1122
1123        /* Found required data */
1124
1125        /*
1126         * By doing switch() on the bit mask "wrap" we avoid having to
1127         * check two variables for all permutations: --> faster!
1128         */
1129        switch (wrap) {
1130        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1131                if (d1 < d2)
1132                        return rq1;
1133                else if (d2 < d1)
1134                        return rq2;
1135                else {
1136                        if (s1 >= s2)
1137                                return rq1;
1138                        else
1139                                return rq2;
1140                }
1141
1142        case CFQ_RQ2_WRAP:
1143                return rq1;
1144        case CFQ_RQ1_WRAP:
1145                return rq2;
1146        case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1147        default:
1148                /*
1149                 * Since both rqs are wrapped,
1150                 * start with the one that's further behind head
1151                 * (--> only *one* back seek required),
1152                 * since back seek takes more time than forward.
1153                 */
1154                if (s1 <= s2)
1155                        return rq1;
1156                else
1157                        return rq2;
1158        }
1159}
1160
1161/*
1162 * The below is leftmost cache rbtree addon
1163 */
1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1165{
1166        /* Service tree is empty */
1167        if (!root->count)
1168                return NULL;
1169
1170        if (!root->left)
1171                root->left = rb_first(&root->rb);
1172
1173        if (root->left)
1174                return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176        return NULL;
1177}
1178
1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181        if (!root->left)
1182                root->left = rb_first(&root->rb);
1183
1184        if (root->left)
1185                return rb_entry_cfqg(root->left);
1186
1187        return NULL;
1188}
1189
1190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192        rb_erase(n, root);
1193        RB_CLEAR_NODE(n);
1194}
1195
1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198        if (root->left == n)
1199                root->left = NULL;
1200        rb_erase_init(n, &root->rb);
1201        --root->count;
1202}
1203
1204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
1207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209                  struct request *last)
1210{
1211        struct rb_node *rbnext = rb_next(&last->rb_node);
1212        struct rb_node *rbprev = rb_prev(&last->rb_node);
1213        struct request *next = NULL, *prev = NULL;
1214
1215        BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1216
1217        if (rbprev)
1218                prev = rb_entry_rq(rbprev);
1219
1220        if (rbnext)
1221                next = rb_entry_rq(rbnext);
1222        else {
1223                rbnext = rb_first(&cfqq->sort_list);
1224                if (rbnext && rbnext != &last->rb_node)
1225                        next = rb_entry_rq(rbnext);
1226        }
1227
1228        return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1229}
1230
1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232                                      struct cfq_queue *cfqq)
1233{
1234        /*
1235         * just an approximation, should be ok.
1236         */
1237        return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1238                       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1239}
1240
1241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244        return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250        struct rb_node **node = &st->rb.rb_node;
1251        struct rb_node *parent = NULL;
1252        struct cfq_group *__cfqg;
1253        s64 key = cfqg_key(st, cfqg);
1254        int left = 1;
1255
1256        while (*node != NULL) {
1257                parent = *node;
1258                __cfqg = rb_entry_cfqg(parent);
1259
1260                if (key < cfqg_key(st, __cfqg))
1261                        node = &parent->rb_left;
1262                else {
1263                        node = &parent->rb_right;
1264                        left = 0;
1265                }
1266        }
1267
1268        if (left)
1269                st->left = &cfqg->rb_node;
1270
1271        rb_link_node(&cfqg->rb_node, parent, node);
1272        rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
1275static void
1276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
1278        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1279
1280        if (cfqg->new_weight) {
1281                cfqg->weight = cfqg->new_weight;
1282                cfqg->new_weight = 0;
1283        }
1284
1285        if (cfqg->new_leaf_weight) {
1286                cfqg->leaf_weight = cfqg->new_leaf_weight;
1287                cfqg->new_leaf_weight = 0;
1288        }
1289}
1290
1291static void
1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1293{
1294        unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
1295        struct cfq_group *pos = cfqg;
1296        struct cfq_group *parent;
1297        bool propagate;
1298
1299        /* add to the service tree */
1300        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301
1302        cfq_update_group_weight(cfqg);
1303        __cfq_group_service_tree_add(st, cfqg);
1304
1305        /*
1306         * Activate @cfqg and calculate the portion of vfraction @cfqg is
1307         * entitled to.  vfraction is calculated by walking the tree
1308         * towards the root calculating the fraction it has at each level.
1309         * The compounded ratio is how much vfraction @cfqg owns.
1310         *
1311         * Start with the proportion tasks in this cfqg has against active
1312         * children cfqgs - its leaf_weight against children_weight.
1313         */
1314        propagate = !pos->nr_active++;
1315        pos->children_weight += pos->leaf_weight;
1316        vfr = vfr * pos->leaf_weight / pos->children_weight;
1317
1318        /*
1319         * Compound ->weight walking up the tree.  Both activation and
1320         * vfraction calculation are done in the same loop.  Propagation
1321         * stops once an already activated node is met.  vfraction
1322         * calculation should always continue to the root.
1323         */
1324        while ((parent = cfqg_parent(pos))) {
1325                if (propagate) {
1326                        propagate = !parent->nr_active++;
1327                        parent->children_weight += pos->weight;
1328                }
1329                vfr = vfr * pos->weight / parent->children_weight;
1330                pos = parent;
1331        }
1332
1333        cfqg->vfraction = max_t(unsigned, vfr, 1);
1334}
1335
1336static void
1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1338{
1339        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1340        struct cfq_group *__cfqg;
1341        struct rb_node *n;
1342
1343        cfqg->nr_cfqq++;
1344        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1345                return;
1346
1347        /*
1348         * Currently put the group at the end. Later implement something
1349         * so that groups get lesser vtime based on their weights, so that
1350         * if group does not loose all if it was not continuously backlogged.
1351         */
1352        n = rb_last(&st->rb);
1353        if (n) {
1354                __cfqg = rb_entry_cfqg(n);
1355                cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1356        } else
1357                cfqg->vdisktime = st->min_vdisktime;
1358        cfq_group_service_tree_add(st, cfqg);
1359}
1360
1361static void
1362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1363{
1364        struct cfq_group *pos = cfqg;
1365        bool propagate;
1366
1367        /*
1368         * Undo activation from cfq_group_service_tree_add().  Deactivate
1369         * @cfqg and propagate deactivation upwards.
1370         */
1371        propagate = !--pos->nr_active;
1372        pos->children_weight -= pos->leaf_weight;
1373
1374        while (propagate) {
1375                struct cfq_group *parent = cfqg_parent(pos);
1376
1377                /* @pos has 0 nr_active at this point */
1378                WARN_ON_ONCE(pos->children_weight);
1379                pos->vfraction = 0;
1380
1381                if (!parent)
1382                        break;
1383
1384                propagate = !--parent->nr_active;
1385                parent->children_weight -= pos->weight;
1386                pos = parent;
1387        }
1388
1389        /* remove from the service tree */
1390        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1391                cfq_rb_erase(&cfqg->rb_node, st);
1392}
1393
1394static void
1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1396{
1397        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1398
1399        BUG_ON(cfqg->nr_cfqq < 1);
1400        cfqg->nr_cfqq--;
1401
1402        /* If there are other cfq queues under this group, don't delete it */
1403        if (cfqg->nr_cfqq)
1404                return;
1405
1406        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1407        cfq_group_service_tree_del(st, cfqg);
1408        cfqg->saved_wl_slice = 0;
1409        cfqg_stats_update_dequeue(cfqg);
1410}
1411
1412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1413                                                unsigned int *unaccounted_time)
1414{
1415        unsigned int slice_used;
1416
1417        /*
1418         * Queue got expired before even a single request completed or
1419         * got expired immediately after first request completion.
1420         */
1421        if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1422                /*
1423                 * Also charge the seek time incurred to the group, otherwise
1424                 * if there are mutiple queues in the group, each can dispatch
1425                 * a single request on seeky media and cause lots of seek time
1426                 * and group will never know it.
1427                 */
1428                slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1429                                        1);
1430        } else {
1431                slice_used = jiffies - cfqq->slice_start;
1432                if (slice_used > cfqq->allocated_slice) {
1433                        *unaccounted_time = slice_used - cfqq->allocated_slice;
1434                        slice_used = cfqq->allocated_slice;
1435                }
1436                if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1437                        *unaccounted_time += cfqq->slice_start -
1438                                        cfqq->dispatch_start;
1439        }
1440
1441        return slice_used;
1442}
1443
1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1445                                struct cfq_queue *cfqq)
1446{
1447        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1448        unsigned int used_sl, charge, unaccounted_sl = 0;
1449        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1450                        - cfqg->service_tree_idle.count;
1451        unsigned int vfr;
1452
1453        BUG_ON(nr_sync < 0);
1454        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1455
1456        if (iops_mode(cfqd))
1457                charge = cfqq->slice_dispatch;
1458        else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1459                charge = cfqq->allocated_slice;
1460
1461        /*
1462         * Can't update vdisktime while on service tree and cfqg->vfraction
1463         * is valid only while on it.  Cache vfr, leave the service tree,
1464         * update vdisktime and go back on.  The re-addition to the tree
1465         * will also update the weights as necessary.
1466         */
1467        vfr = cfqg->vfraction;
1468        cfq_group_service_tree_del(st, cfqg);
1469        cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1470        cfq_group_service_tree_add(st, cfqg);
1471
1472        /* This group is being expired. Save the context */
1473        if (time_after(cfqd->workload_expires, jiffies)) {
1474                cfqg->saved_wl_slice = cfqd->workload_expires
1475                                                - jiffies;
1476                cfqg->saved_wl_type = cfqd->serving_wl_type;
1477                cfqg->saved_wl_class = cfqd->serving_wl_class;
1478        } else
1479                cfqg->saved_wl_slice = 0;
1480
1481        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1482                                        st->min_vdisktime);
1483        cfq_log_cfqq(cfqq->cfqd, cfqq,
1484                     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1485                     used_sl, cfqq->slice_dispatch, charge,
1486                     iops_mode(cfqd), cfqq->nr_sectors);
1487        cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1488        cfqg_stats_set_start_empty_time(cfqg);
1489}
1490
1491/**
1492 * cfq_init_cfqg_base - initialize base part of a cfq_group
1493 * @cfqg: cfq_group to initialize
1494 *
1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1496 * is enabled or not.
1497 */
1498static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1499{
1500        struct cfq_rb_root *st;
1501        int i, j;
1502
1503        for_each_cfqg_st(cfqg, i, j, st)
1504                *st = CFQ_RB_ROOT;
1505        RB_CLEAR_NODE(&cfqg->rb_node);
1506
1507        cfqg->ttime.last_end_request = jiffies;
1508}
1509
1510#ifdef CONFIG_CFQ_GROUP_IOSCHED
1511static void cfq_pd_init(struct blkcg_gq *blkg)
1512{
1513        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1514
1515        cfq_init_cfqg_base(cfqg);
1516        cfqg->weight = blkg->blkcg->cfq_weight;
1517        cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
1518}
1519
1520static void cfq_pd_offline(struct blkcg_gq *blkg)
1521{
1522        /*
1523         * @blkg is going offline and will be ignored by
1524         * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1525         * that they don't get lost.  If IOs complete after this point, the
1526         * stats for them will be lost.  Oh well...
1527         */
1528        cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1529}
1530
1531/* offset delta from cfqg->stats to cfqg->dead_stats */
1532static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1533                                        offsetof(struct cfq_group, stats);
1534
1535/* to be used by recursive prfill, sums live and dead stats recursively */
1536static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1537{
1538        u64 sum = 0;
1539
1540        sum += blkg_stat_recursive_sum(pd, off);
1541        sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1542        return sum;
1543}
1544
1545/* to be used by recursive prfill, sums live and dead rwstats recursively */
1546static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1547                                                       int off)
1548{
1549        struct blkg_rwstat a, b;
1550
1551        a = blkg_rwstat_recursive_sum(pd, off);
1552        b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1553        blkg_rwstat_merge(&a, &b);
1554        return a;
1555}
1556
1557static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1558{
1559        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1560
1561        cfqg_stats_reset(&cfqg->stats);
1562        cfqg_stats_reset(&cfqg->dead_stats);
1563}
1564
1565/*
1566 * Search for the cfq group current task belongs to. request_queue lock must
1567 * be held.
1568 */
1569static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1570                                                struct blkcg *blkcg)
1571{
1572        struct request_queue *q = cfqd->queue;
1573        struct cfq_group *cfqg = NULL;
1574
1575        /* avoid lookup for the common case where there's no blkcg */
1576        if (blkcg == &blkcg_root) {
1577                cfqg = cfqd->root_group;
1578        } else {
1579                struct blkcg_gq *blkg;
1580
1581                blkg = blkg_lookup_create(blkcg, q);
1582                if (!IS_ERR(blkg))
1583                        cfqg = blkg_to_cfqg(blkg);
1584        }
1585
1586        return cfqg;
1587}
1588
1589static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1590{
1591        /* Currently, all async queues are mapped to root group */
1592        if (!cfq_cfqq_sync(cfqq))
1593                cfqg = cfqq->cfqd->root_group;
1594
1595        cfqq->cfqg = cfqg;
1596        /* cfqq reference on cfqg */
1597        cfqg_get(cfqg);
1598}
1599
1600static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1601                                     struct blkg_policy_data *pd, int off)
1602{
1603        struct cfq_group *cfqg = pd_to_cfqg(pd);
1604
1605        if (!cfqg->dev_weight)
1606                return 0;
1607        return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1608}
1609
1610static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1611                                    struct seq_file *sf)
1612{
1613        blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1614                          cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
1615                          false);
1616        return 0;
1617}
1618
1619static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1620                                          struct blkg_policy_data *pd, int off)
1621{
1622        struct cfq_group *cfqg = pd_to_cfqg(pd);
1623
1624        if (!cfqg->dev_leaf_weight)
1625                return 0;
1626        return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1627}
1628
1629static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
1630                                         struct cftype *cft,
1631                                         struct seq_file *sf)
1632{
1633        blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1634                          cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
1635                          false);
1636        return 0;
1637}
1638
1639static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1640                            struct seq_file *sf)
1641{
1642        seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
1643        return 0;
1644}
1645
1646static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
1647                                 struct seq_file *sf)
1648{
1649        seq_printf(sf, "%u\n",
1650                   cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
1651        return 0;
1652}
1653
1654static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1655                                    const char *buf, bool is_leaf_weight)
1656{
1657        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1658        struct blkg_conf_ctx ctx;
1659        struct cfq_group *cfqg;
1660        int ret;
1661
1662        ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1663        if (ret)
1664                return ret;
1665
1666        ret = -EINVAL;
1667        cfqg = blkg_to_cfqg(ctx.blkg);
1668        if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1669                if (!is_leaf_weight) {
1670                        cfqg->dev_weight = ctx.v;
1671                        cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1672                } else {
1673                        cfqg->dev_leaf_weight = ctx.v;
1674                        cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1675                }
1676                ret = 0;
1677        }
1678
1679        blkg_conf_finish(&ctx);
1680        return ret;
1681}
1682
1683static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1684                                  const char *buf)
1685{
1686        return __cfqg_set_weight_device(cgrp, cft, buf, false);
1687}
1688
1689static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
1690                                       const char *buf)
1691{
1692        return __cfqg_set_weight_device(cgrp, cft, buf, true);
1693}
1694
1695static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
1696                            bool is_leaf_weight)
1697{
1698        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1699        struct blkcg_gq *blkg;
1700
1701        if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1702                return -EINVAL;
1703
1704        spin_lock_irq(&blkcg->lock);
1705
1706        if (!is_leaf_weight)
1707                blkcg->cfq_weight = val;
1708        else
1709                blkcg->cfq_leaf_weight = val;
1710
1711        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1712                struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1713
1714                if (!cfqg)
1715                        continue;
1716
1717                if (!is_leaf_weight) {
1718                        if (!cfqg->dev_weight)
1719                                cfqg->new_weight = blkcg->cfq_weight;
1720                } else {
1721                        if (!cfqg->dev_leaf_weight)
1722                                cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1723                }
1724        }
1725
1726        spin_unlock_irq(&blkcg->lock);
1727        return 0;
1728}
1729
1730static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1731{
1732        return __cfq_set_weight(cgrp, cft, val, false);
1733}
1734
1735static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1736{
1737        return __cfq_set_weight(cgrp, cft, val, true);
1738}
1739
1740static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1741                           struct seq_file *sf)
1742{
1743        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1744
1745        blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
1746                          cft->private, false);
1747        return 0;
1748}
1749
1750static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
1751                             struct seq_file *sf)
1752{
1753        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1754
1755        blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
1756                          cft->private, true);
1757        return 0;
1758}
1759
1760static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1761                                      struct blkg_policy_data *pd, int off)
1762{
1763        u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1764
1765        return __blkg_prfill_u64(sf, pd, sum);
1766}
1767
1768static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1769                                        struct blkg_policy_data *pd, int off)
1770{
1771        struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1772
1773        return __blkg_prfill_rwstat(sf, pd, &sum);
1774}
1775
1776static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
1777                                     struct seq_file *sf)
1778{
1779        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1780
1781        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
1782                          &blkcg_policy_cfq, cft->private, false);
1783        return 0;
1784}
1785
1786static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
1787                                       struct seq_file *sf)
1788{
1789        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1790
1791        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
1792                          &blkcg_policy_cfq, cft->private, true);
1793        return 0;
1794}
1795
1796#ifdef CONFIG_DEBUG_BLK_CGROUP
1797static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1798                                      struct blkg_policy_data *pd, int off)
1799{
1800        struct cfq_group *cfqg = pd_to_cfqg(pd);
1801        u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1802        u64 v = 0;
1803
1804        if (samples) {
1805                v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1806                do_div(v, samples);
1807        }
1808        __blkg_prfill_u64(sf, pd, v);
1809        return 0;
1810}
1811
1812/* print avg_queue_size */
1813static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1814                                     struct seq_file *sf)
1815{
1816        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1817
1818        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
1819                          &blkcg_policy_cfq, 0, false);
1820        return 0;
1821}
1822#endif  /* CONFIG_DEBUG_BLK_CGROUP */
1823
1824static struct cftype cfq_blkcg_files[] = {
1825        /* on root, weight is mapped to leaf_weight */
1826        {
1827                .name = "weight_device",
1828                .flags = CFTYPE_ONLY_ON_ROOT,
1829                .read_seq_string = cfqg_print_leaf_weight_device,
1830                .write_string = cfqg_set_leaf_weight_device,
1831                .max_write_len = 256,
1832        },
1833        {
1834                .name = "weight",
1835                .flags = CFTYPE_ONLY_ON_ROOT,
1836                .read_seq_string = cfq_print_leaf_weight,
1837                .write_u64 = cfq_set_leaf_weight,
1838        },
1839
1840        /* no such mapping necessary for !roots */
1841        {
1842                .name = "weight_device",
1843                .flags = CFTYPE_NOT_ON_ROOT,
1844                .read_seq_string = cfqg_print_weight_device,
1845                .write_string = cfqg_set_weight_device,
1846                .max_write_len = 256,
1847        },
1848        {
1849                .name = "weight",
1850                .flags = CFTYPE_NOT_ON_ROOT,
1851                .read_seq_string = cfq_print_weight,
1852                .write_u64 = cfq_set_weight,
1853        },
1854
1855        {
1856                .name = "leaf_weight_device",
1857                .read_seq_string = cfqg_print_leaf_weight_device,
1858                .write_string = cfqg_set_leaf_weight_device,
1859                .max_write_len = 256,
1860        },
1861        {
1862                .name = "leaf_weight",
1863                .read_seq_string = cfq_print_leaf_weight,
1864                .write_u64 = cfq_set_leaf_weight,
1865        },
1866
1867        /* statistics, covers only the tasks in the cfqg */
1868        {
1869                .name = "time",
1870                .private = offsetof(struct cfq_group, stats.time),
1871                .read_seq_string = cfqg_print_stat,
1872        },
1873        {
1874                .name = "sectors",
1875                .private = offsetof(struct cfq_group, stats.sectors),
1876                .read_seq_string = cfqg_print_stat,
1877        },
1878        {
1879                .name = "io_service_bytes",
1880                .private = offsetof(struct cfq_group, stats.service_bytes),
1881                .read_seq_string = cfqg_print_rwstat,
1882        },
1883        {
1884                .name = "io_serviced",
1885                .private = offsetof(struct cfq_group, stats.serviced),
1886                .read_seq_string = cfqg_print_rwstat,
1887        },
1888        {
1889                .name = "io_service_time",
1890                .private = offsetof(struct cfq_group, stats.service_time),
1891                .read_seq_string = cfqg_print_rwstat,
1892        },
1893        {
1894                .name = "io_wait_time",
1895                .private = offsetof(struct cfq_group, stats.wait_time),
1896                .read_seq_string = cfqg_print_rwstat,
1897        },
1898        {
1899                .name = "io_merged",
1900                .private = offsetof(struct cfq_group, stats.merged),
1901                .read_seq_string = cfqg_print_rwstat,
1902        },
1903        {
1904                .name = "io_queued",
1905                .private = offsetof(struct cfq_group, stats.queued),
1906                .read_seq_string = cfqg_print_rwstat,
1907        },
1908
1909        /* the same statictics which cover the cfqg and its descendants */
1910        {
1911                .name = "time_recursive",
1912                .private = offsetof(struct cfq_group, stats.time),
1913                .read_seq_string = cfqg_print_stat_recursive,
1914        },
1915        {
1916                .name = "sectors_recursive",
1917                .private = offsetof(struct cfq_group, stats.sectors),
1918                .read_seq_string = cfqg_print_stat_recursive,
1919        },
1920        {
1921                .name = "io_service_bytes_recursive",
1922                .private = offsetof(struct cfq_group, stats.service_bytes),
1923                .read_seq_string = cfqg_print_rwstat_recursive,
1924        },
1925        {
1926                .name = "io_serviced_recursive",
1927                .private = offsetof(struct cfq_group, stats.serviced),
1928                .read_seq_string = cfqg_print_rwstat_recursive,
1929        },
1930        {
1931                .name = "io_service_time_recursive",
1932                .private = offsetof(struct cfq_group, stats.service_time),
1933                .read_seq_string = cfqg_print_rwstat_recursive,
1934        },
1935        {
1936                .name = "io_wait_time_recursive",
1937                .private = offsetof(struct cfq_group, stats.wait_time),
1938                .read_seq_string = cfqg_print_rwstat_recursive,
1939        },
1940        {
1941                .name = "io_merged_recursive",
1942                .private = offsetof(struct cfq_group, stats.merged),
1943                .read_seq_string = cfqg_print_rwstat_recursive,
1944        },
1945        {
1946                .name = "io_queued_recursive",
1947                .private = offsetof(struct cfq_group, stats.queued),
1948                .read_seq_string = cfqg_print_rwstat_recursive,
1949        },
1950#ifdef CONFIG_DEBUG_BLK_CGROUP
1951        {
1952                .name = "avg_queue_size",
1953                .read_seq_string = cfqg_print_avg_queue_size,
1954        },
1955        {
1956                .name = "group_wait_time",
1957                .private = offsetof(struct cfq_group, stats.group_wait_time),
1958                .read_seq_string = cfqg_print_stat,
1959        },
1960        {
1961                .name = "idle_time",
1962                .private = offsetof(struct cfq_group, stats.idle_time),
1963                .read_seq_string = cfqg_print_stat,
1964        },
1965        {
1966                .name = "empty_time",
1967                .private = offsetof(struct cfq_group, stats.empty_time),
1968                .read_seq_string = cfqg_print_stat,
1969        },
1970        {
1971                .name = "dequeue",
1972                .private = offsetof(struct cfq_group, stats.dequeue),
1973                .read_seq_string = cfqg_print_stat,
1974        },
1975        {
1976                .name = "unaccounted_time",
1977                .private = offsetof(struct cfq_group, stats.unaccounted_time),
1978                .read_seq_string = cfqg_print_stat,
1979        },
1980#endif  /* CONFIG_DEBUG_BLK_CGROUP */
1981        { }     /* terminate */
1982};
1983#else /* GROUP_IOSCHED */
1984static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1985                                                struct blkcg *blkcg)
1986{
1987        return cfqd->root_group;
1988}
1989
1990static inline void
1991cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1992        cfqq->cfqg = cfqg;
1993}
1994
1995#endif /* GROUP_IOSCHED */
1996
1997/*
1998 * The cfqd->service_trees holds all pending cfq_queue's that have
1999 * requests waiting to be processed. It is sorted in the order that
2000 * we will service the queues.
2001 */
2002static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2003                                 bool add_front)
2004{
2005        struct rb_node **p, *parent;
2006        struct cfq_queue *__cfqq;
2007        unsigned long rb_key;
2008        struct cfq_rb_root *st;
2009        int left;
2010        int new_cfqq = 1;
2011
2012        st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2013        if (cfq_class_idle(cfqq)) {
2014                rb_key = CFQ_IDLE_DELAY;
2015                parent = rb_last(&st->rb);
2016                if (parent && parent != &cfqq->rb_node) {
2017                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2018                        rb_key += __cfqq->rb_key;
2019                } else
2020                        rb_key += jiffies;
2021        } else if (!add_front) {
2022                /*
2023                 * Get our rb key offset. Subtract any residual slice
2024                 * value carried from last service. A negative resid
2025                 * count indicates slice overrun, and this should position
2026                 * the next service time further away in the tree.
2027                 */
2028                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2029                rb_key -= cfqq->slice_resid;
2030                cfqq->slice_resid = 0;
2031        } else {
2032                rb_key = -HZ;
2033                __cfqq = cfq_rb_first(st);
2034                rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2035        }
2036
2037        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2038                new_cfqq = 0;
2039                /*
2040                 * same position, nothing more to do
2041                 */
2042                if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2043                        return;
2044
2045                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2046                cfqq->service_tree = NULL;
2047        }
2048
2049        left = 1;
2050        parent = NULL;
2051        cfqq->service_tree = st;
2052        p = &st->rb.rb_node;
2053        while (*p) {
2054                parent = *p;
2055                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2056
2057                /*
2058                 * sort by key, that represents service time.
2059                 */
2060                if (time_before(rb_key, __cfqq->rb_key))
2061                        p = &parent->rb_left;
2062                else {
2063                        p = &parent->rb_right;
2064                        left = 0;
2065                }
2066        }
2067
2068        if (left)
2069                st->left = &cfqq->rb_node;
2070
2071        cfqq->rb_key = rb_key;
2072        rb_link_node(&cfqq->rb_node, parent, p);
2073        rb_insert_color(&cfqq->rb_node, &st->rb);
2074        st->count++;
2075        if (add_front || !new_cfqq)
2076                return;
2077        cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2078}
2079
2080static struct cfq_queue *
2081cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2082                     sector_t sector, struct rb_node **ret_parent,
2083                     struct rb_node ***rb_link)
2084{
2085        struct rb_node **p, *parent;
2086        struct cfq_queue *cfqq = NULL;
2087
2088        parent = NULL;
2089        p = &root->rb_node;
2090        while (*p) {
2091                struct rb_node **n;
2092
2093                parent = *p;
2094                cfqq = rb_entry(parent, struct cfq_queue, p_node);
2095
2096                /*
2097                 * Sort strictly based on sector.  Smallest to the left,
2098                 * largest to the right.
2099                 */
2100                if (sector > blk_rq_pos(cfqq->next_rq))
2101                        n = &(*p)->rb_right;
2102                else if (sector < blk_rq_pos(cfqq->next_rq))
2103                        n = &(*p)->rb_left;
2104                else
2105                        break;
2106                p = n;
2107                cfqq = NULL;
2108        }
2109
2110        *ret_parent = parent;
2111        if (rb_link)
2112                *rb_link = p;
2113        return cfqq;
2114}
2115
2116static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2117{
2118        struct rb_node **p, *parent;
2119        struct cfq_queue *__cfqq;
2120
2121        if (cfqq->p_root) {
2122                rb_erase(&cfqq->p_node, cfqq->p_root);
2123                cfqq->p_root = NULL;
2124        }
2125
2126        if (cfq_class_idle(cfqq))
2127                return;
2128        if (!cfqq->next_rq)
2129                return;
2130
2131        cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2132        __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2133                                      blk_rq_pos(cfqq->next_rq), &parent, &p);
2134        if (!__cfqq) {
2135                rb_link_node(&cfqq->p_node, parent, p);
2136                rb_insert_color(&cfqq->p_node, cfqq->p_root);
2137        } else
2138                cfqq->p_root = NULL;
2139}
2140
2141/*
2142 * Update cfqq's position in the service tree.
2143 */
2144static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2145{
2146        /*
2147         * Resorting requires the cfqq to be on the RR list already.
2148         */
2149        if (cfq_cfqq_on_rr(cfqq)) {
2150                cfq_service_tree_add(cfqd, cfqq, 0);
2151                cfq_prio_tree_add(cfqd, cfqq);
2152        }
2153}
2154
2155/*
2156 * add to busy list of queues for service, trying to be fair in ordering
2157 * the pending list according to last request service
2158 */
2159static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2160{
2161        cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2162        BUG_ON(cfq_cfqq_on_rr(cfqq));
2163        cfq_mark_cfqq_on_rr(cfqq);
2164        cfqd->busy_queues++;
2165        if (cfq_cfqq_sync(cfqq))
2166                cfqd->busy_sync_queues++;
2167
2168        cfq_resort_rr_list(cfqd, cfqq);
2169}
2170
2171/*
2172 * Called when the cfqq no longer has requests pending, remove it from
2173 * the service tree.
2174 */
2175static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2176{
2177        cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2178        BUG_ON(!cfq_cfqq_on_rr(cfqq));
2179        cfq_clear_cfqq_on_rr(cfqq);
2180
2181        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2182                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2183                cfqq->service_tree = NULL;
2184        }
2185        if (cfqq->p_root) {
2186                rb_erase(&cfqq->p_node, cfqq->p_root);
2187                cfqq->p_root = NULL;
2188        }
2189
2190        cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2191        BUG_ON(!cfqd->busy_queues);
2192        cfqd->busy_queues--;
2193        if (cfq_cfqq_sync(cfqq))
2194                cfqd->busy_sync_queues--;
2195}
2196
2197/*
2198 * rb tree support functions
2199 */
2200static void cfq_del_rq_rb(struct request *rq)
2201{
2202        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2203        const int sync = rq_is_sync(rq);
2204
2205        BUG_ON(!cfqq->queued[sync]);
2206        cfqq->queued[sync]--;
2207
2208        elv_rb_del(&cfqq->sort_list, rq);
2209
2210        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2211                /*
2212                 * Queue will be deleted from service tree when we actually
2213                 * expire it later. Right now just remove it from prio tree
2214                 * as it is empty.
2215                 */
2216                if (cfqq->p_root) {
2217                        rb_erase(&cfqq->p_node, cfqq->p_root);
2218                        cfqq->p_root = NULL;
2219                }
2220        }
2221}
2222
2223static void cfq_add_rq_rb(struct request *rq)
2224{
2225        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2226        struct cfq_data *cfqd = cfqq->cfqd;
2227        struct request *prev;
2228
2229        cfqq->queued[rq_is_sync(rq)]++;
2230
2231        elv_rb_add(&cfqq->sort_list, rq);
2232
2233        if (!cfq_cfqq_on_rr(cfqq))
2234                cfq_add_cfqq_rr(cfqd, cfqq);
2235
2236        /*
2237         * check if this request is a better next-serve candidate
2238         */
2239        prev = cfqq->next_rq;
2240        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2241
2242        /*
2243         * adjust priority tree position, if ->next_rq changes
2244         */
2245        if (prev != cfqq->next_rq)
2246                cfq_prio_tree_add(cfqd, cfqq);
2247
2248        BUG_ON(!cfqq->next_rq);
2249}
2250
2251static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2252{
2253        elv_rb_del(&cfqq->sort_list, rq);
2254        cfqq->queued[rq_is_sync(rq)]--;
2255        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2256        cfq_add_rq_rb(rq);
2257        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2258                                 rq->cmd_flags);
2259}
2260
2261static struct request *
2262cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2263{
2264        struct task_struct *tsk = current;
2265        struct cfq_io_cq *cic;
2266        struct cfq_queue *cfqq;
2267
2268        cic = cfq_cic_lookup(cfqd, tsk->io_context);
2269        if (!cic)
2270                return NULL;
2271
2272        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2273        if (cfqq) {
2274                sector_t sector = bio->bi_sector + bio_sectors(bio);
2275
2276                return elv_rb_find(&cfqq->sort_list, sector);
2277        }
2278
2279        return NULL;
2280}
2281
2282static void cfq_activate_request(struct request_queue *q, struct request *rq)
2283{
2284        struct cfq_data *cfqd = q->elevator->elevator_data;
2285
2286        cfqd->rq_in_driver++;
2287        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2288                                                cfqd->rq_in_driver);
2289
2290        cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2291}
2292
2293static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2294{
2295        struct cfq_data *cfqd = q->elevator->elevator_data;
2296
2297        WARN_ON(!cfqd->rq_in_driver);
2298        cfqd->rq_in_driver--;
2299        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2300                                                cfqd->rq_in_driver);
2301}
2302
2303static void cfq_remove_request(struct request *rq)
2304{
2305        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2306
2307        if (cfqq->next_rq == rq)
2308                cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2309
2310        list_del_init(&rq->queuelist);
2311        cfq_del_rq_rb(rq);
2312
2313        cfqq->cfqd->rq_queued--;
2314        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2315        if (rq->cmd_flags & REQ_PRIO) {
2316                WARN_ON(!cfqq->prio_pending);
2317                cfqq->prio_pending--;
2318        }
2319}
2320
2321static int cfq_merge(struct request_queue *q, struct request **req,
2322                     struct bio *bio)
2323{
2324        struct cfq_data *cfqd = q->elevator->elevator_data;
2325        struct request *__rq;
2326
2327        __rq = cfq_find_rq_fmerge(cfqd, bio);
2328        if (__rq && elv_rq_merge_ok(__rq, bio)) {
2329                *req = __rq;
2330                return ELEVATOR_FRONT_MERGE;
2331        }
2332
2333        return ELEVATOR_NO_MERGE;
2334}
2335
2336static void cfq_merged_request(struct request_queue *q, struct request *req,
2337                               int type)
2338{
2339        if (type == ELEVATOR_FRONT_MERGE) {
2340                struct cfq_queue *cfqq = RQ_CFQQ(req);
2341
2342                cfq_reposition_rq_rb(cfqq, req);
2343        }
2344}
2345
2346static void cfq_bio_merged(struct request_queue *q, struct request *req,
2347                                struct bio *bio)
2348{
2349        cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2350}
2351
2352static void
2353cfq_merged_requests(struct request_queue *q, struct request *rq,
2354                    struct request *next)
2355{
2356        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2357        struct cfq_data *cfqd = q->elevator->elevator_data;
2358
2359        /*
2360         * reposition in fifo if next is older than rq
2361         */
2362        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2363            time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
2364            cfqq == RQ_CFQQ(next)) {
2365                list_move(&rq->queuelist, &next->queuelist);
2366                rq_set_fifo_time(rq, rq_fifo_time(next));
2367        }
2368
2369        if (cfqq->next_rq == next)
2370                cfqq->next_rq = rq;
2371        cfq_remove_request(next);
2372        cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2373
2374        cfqq = RQ_CFQQ(next);
2375        /*
2376         * all requests of this queue are merged to other queues, delete it
2377         * from the service tree. If it's the active_queue,
2378         * cfq_dispatch_requests() will choose to expire it or do idle
2379         */
2380        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2381            cfqq != cfqd->active_queue)
2382                cfq_del_cfqq_rr(cfqd, cfqq);
2383}
2384
2385static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2386                           struct bio *bio)
2387{
2388        struct cfq_data *cfqd = q->elevator->elevator_data;
2389        struct cfq_io_cq *cic;
2390        struct cfq_queue *cfqq;
2391
2392        /*
2393         * Disallow merge of a sync bio into an async request.
2394         */
2395        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2396                return false;
2397
2398        /*
2399         * Lookup the cfqq that this bio will be queued with and allow
2400         * merge only if rq is queued there.
2401         */
2402        cic = cfq_cic_lookup(cfqd, current->io_context);
2403        if (!cic)
2404                return false;
2405
2406        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2407        return cfqq == RQ_CFQQ(rq);
2408}
2409
2410static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2411{
2412        del_timer(&cfqd->idle_slice_timer);
2413        cfqg_stats_update_idle_time(cfqq->cfqg);
2414}
2415
2416static void __cfq_set_active_queue(struct cfq_data *cfqd,
2417                                   struct cfq_queue *cfqq)
2418{
2419        if (cfqq) {
2420                cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2421                                cfqd->serving_wl_class, cfqd->serving_wl_type);
2422                cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2423                cfqq->slice_start = 0;
2424                cfqq->dispatch_start = jiffies;
2425                cfqq->allocated_slice = 0;
2426                cfqq->slice_end = 0;
2427                cfqq->slice_dispatch = 0;
2428                cfqq->nr_sectors = 0;
2429
2430                cfq_clear_cfqq_wait_request(cfqq);
2431                cfq_clear_cfqq_must_dispatch(cfqq);
2432                cfq_clear_cfqq_must_alloc_slice(cfqq);
2433                cfq_clear_cfqq_fifo_expire(cfqq);
2434                cfq_mark_cfqq_slice_new(cfqq);
2435
2436                cfq_del_timer(cfqd, cfqq);
2437        }
2438
2439        cfqd->active_queue = cfqq;
2440}
2441
2442/*
2443 * current cfqq expired its slice (or was too idle), select new one
2444 */
2445static void
2446__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2447                    bool timed_out)
2448{
2449        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2450
2451        if (cfq_cfqq_wait_request(cfqq))
2452                cfq_del_timer(cfqd, cfqq);
2453
2454        cfq_clear_cfqq_wait_request(cfqq);
2455        cfq_clear_cfqq_wait_busy(cfqq);
2456
2457        /*
2458         * If this cfqq is shared between multiple processes, check to
2459         * make sure that those processes are still issuing I/Os within
2460         * the mean seek distance.  If not, it may be time to break the
2461         * queues apart again.
2462         */
2463        if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2464                cfq_mark_cfqq_split_coop(cfqq);
2465
2466        /*
2467         * store what was left of this slice, if the queue idled/timed out
2468         */
2469        if (timed_out) {
2470                if (cfq_cfqq_slice_new(cfqq))
2471                        cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2472                else
2473                        cfqq->slice_resid = cfqq->slice_end - jiffies;
2474                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2475        }
2476
2477        cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2478
2479        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2480                cfq_del_cfqq_rr(cfqd, cfqq);
2481
2482        cfq_resort_rr_list(cfqd, cfqq);
2483
2484        if (cfqq == cfqd->active_queue)
2485                cfqd->active_queue = NULL;
2486
2487        if (cfqd->active_cic) {
2488                put_io_context(cfqd->active_cic->icq.ioc);
2489                cfqd->active_cic = NULL;
2490        }
2491}
2492
2493static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2494{
2495        struct cfq_queue *cfqq = cfqd->active_queue;
2496
2497        if (cfqq)
2498                __cfq_slice_expired(cfqd, cfqq, timed_out);
2499}
2500
2501/*
2502 * Get next queue for service. Unless we have a queue preemption,
2503 * we'll simply select the first cfqq in the service tree.
2504 */
2505static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2506{
2507        struct cfq_rb_root *st = st_for(cfqd->serving_group,
2508                        cfqd->serving_wl_class, cfqd->serving_wl_type);
2509
2510        if (!cfqd->rq_queued)
2511                return NULL;
2512
2513        /* There is nothing to dispatch */
2514        if (!st)
2515                return NULL;
2516        if (RB_EMPTY_ROOT(&st->rb))
2517                return NULL;
2518        return cfq_rb_first(st);
2519}
2520
2521static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2522{
2523        struct cfq_group *cfqg;
2524        struct cfq_queue *cfqq;
2525        int i, j;
2526        struct cfq_rb_root *st;
2527
2528        if (!cfqd->rq_queued)
2529                return NULL;
2530
2531        cfqg = cfq_get_next_cfqg(cfqd);
2532        if (!cfqg)
2533                return NULL;
2534
2535        for_each_cfqg_st(cfqg, i, j, st)
2536                if ((cfqq = cfq_rb_first(st)) != NULL)
2537                        return cfqq;
2538        return NULL;
2539}
2540
2541/*
2542 * Get and set a new active queue for service.
2543 */
2544static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2545                                              struct cfq_queue *cfqq)
2546{
2547        if (!cfqq)
2548                cfqq = cfq_get_next_queue(cfqd);
2549
2550        __cfq_set_active_queue(cfqd, cfqq);
2551        return cfqq;
2552}
2553
2554static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2555                                          struct request *rq)
2556{
2557        if (blk_rq_pos(rq) >= cfqd->last_position)
2558                return blk_rq_pos(rq) - cfqd->last_position;
2559        else
2560                return cfqd->last_position - blk_rq_pos(rq);
2561}
2562
2563static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2564                               struct request *rq)
2565{
2566        return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2567}
2568
2569static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2570                                    struct cfq_queue *cur_cfqq)
2571{
2572        struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2573        struct rb_node *parent, *node;
2574        struct cfq_queue *__cfqq;
2575        sector_t sector = cfqd->last_position;
2576
2577        if (RB_EMPTY_ROOT(root))
2578                return NULL;
2579
2580        /*
2581         * First, if we find a request starting at the end of the last
2582         * request, choose it.
2583         */
2584        __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2585        if (__cfqq)
2586                return __cfqq;
2587
2588        /*
2589         * If the exact sector wasn't found, the parent of the NULL leaf
2590         * will contain the closest sector.
2591         */
2592        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2593        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2594                return __cfqq;
2595
2596        if (blk_rq_pos(__cfqq->next_rq) < sector)
2597                node = rb_next(&__cfqq->p_node);
2598        else
2599                node = rb_prev(&__cfqq->p_node);
2600        if (!node)
2601                return NULL;
2602
2603        __cfqq = rb_entry(node, struct cfq_queue, p_node);
2604        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2605                return __cfqq;
2606
2607        return NULL;
2608}
2609
2610/*
2611 * cfqd - obvious
2612 * cur_cfqq - passed in so that we don't decide that the current queue is
2613 *            closely cooperating with itself.
2614 *
2615 * So, basically we're assuming that that cur_cfqq has dispatched at least
2616 * one request, and that cfqd->last_position reflects a position on the disk
2617 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2618 * assumption.
2619 */
2620static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2621                                              struct cfq_queue *cur_cfqq)
2622{
2623        struct cfq_queue *cfqq;
2624
2625        if (cfq_class_idle(cur_cfqq))
2626                return NULL;
2627        if (!cfq_cfqq_sync(cur_cfqq))
2628                return NULL;
2629        if (CFQQ_SEEKY(cur_cfqq))
2630                return NULL;
2631
2632        /*
2633         * Don't search priority tree if it's the only queue in the group.
2634         */
2635        if (cur_cfqq->cfqg->nr_cfqq == 1)
2636                return NULL;
2637
2638        /*
2639         * We should notice if some of the queues are cooperating, eg
2640         * working closely on the same area of the disk. In that case,
2641         * we can group them together and don't waste time idling.
2642         */
2643        cfqq = cfqq_close(cfqd, cur_cfqq);
2644        if (!cfqq)
2645                return NULL;
2646
2647        /* If new queue belongs to different cfq_group, don't choose it */
2648        if (cur_cfqq->cfqg != cfqq->cfqg)
2649                return NULL;
2650
2651        /*
2652         * It only makes sense to merge sync queues.
2653         */
2654        if (!cfq_cfqq_sync(cfqq))
2655                return NULL;
2656        if (CFQQ_SEEKY(cfqq))
2657                return NULL;
2658
2659        /*
2660         * Do not merge queues of different priority classes
2661         */
2662        if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2663                return NULL;
2664
2665        return cfqq;
2666}
2667
2668/*
2669 * Determine whether we should enforce idle window for this queue.
2670 */
2671
2672static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2673{
2674        enum wl_class_t wl_class = cfqq_class(cfqq);
2675        struct cfq_rb_root *st = cfqq->service_tree;
2676
2677        BUG_ON(!st);
2678        BUG_ON(!st->count);
2679
2680        if (!cfqd->cfq_slice_idle)
2681                return false;
2682
2683        /* We never do for idle class queues. */
2684        if (wl_class == IDLE_WORKLOAD)
2685                return false;
2686
2687        /* We do for queues that were marked with idle window flag. */
2688        if (cfq_cfqq_idle_window(cfqq) &&
2689           !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2690                return true;
2691
2692        /*
2693         * Otherwise, we do only if they are the last ones
2694         * in their service tree.
2695         */
2696        if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2697           !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2698                return true;
2699        cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2700        return false;
2701}
2702
2703static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2704{
2705        struct cfq_queue *cfqq = cfqd->active_queue;
2706        struct cfq_io_cq *cic;
2707        unsigned long sl, group_idle = 0;
2708
2709        /*
2710         * SSD device without seek penalty, disable idling. But only do so
2711         * for devices that support queuing, otherwise we still have a problem
2712         * with sync vs async workloads.
2713         */
2714        if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2715                return;
2716
2717        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2718        WARN_ON(cfq_cfqq_slice_new(cfqq));
2719
2720        /*
2721         * idle is disabled, either manually or by past process history
2722         */
2723        if (!cfq_should_idle(cfqd, cfqq)) {
2724                /* no queue idling. Check for group idling */
2725                if (cfqd->cfq_group_idle)
2726                        group_idle = cfqd->cfq_group_idle;
2727                else
2728                        return;
2729        }
2730
2731        /*
2732         * still active requests from this queue, don't idle
2733         */
2734        if (cfqq->dispatched)
2735                return;
2736
2737        /*
2738         * task has exited, don't wait
2739         */
2740        cic = cfqd->active_cic;
2741        if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2742                return;
2743
2744        /*
2745         * If our average think time is larger than the remaining time
2746         * slice, then don't idle. This avoids overrunning the allotted
2747         * time slice.
2748         */
2749        if (sample_valid(cic->ttime.ttime_samples) &&
2750            (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2751                cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2752                             cic->ttime.ttime_mean);
2753                return;
2754        }
2755
2756        /* There are other queues in the group, don't do group idle */
2757        if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2758                return;
2759
2760        cfq_mark_cfqq_wait_request(cfqq);
2761
2762        if (group_idle)
2763                sl = cfqd->cfq_group_idle;
2764        else
2765                sl = cfqd->cfq_slice_idle;
2766
2767        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2768        cfqg_stats_set_start_idle_time(cfqq->cfqg);
2769        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2770                        group_idle ? 1 : 0);
2771}
2772
2773/*
2774 * Move request from internal lists to the request queue dispatch list.
2775 */
2776static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2777{
2778        struct cfq_data *cfqd = q->elevator->elevator_data;
2779        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2780
2781        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2782
2783        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2784        cfq_remove_request(rq);
2785        cfqq->dispatched++;
2786        (RQ_CFQG(rq))->dispatched++;
2787        elv_dispatch_sort(q, rq);
2788
2789        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2790        cfqq->nr_sectors += blk_rq_sectors(rq);
2791        cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2792}
2793
2794/*
2795 * return expired entry, or NULL to just start from scratch in rbtree
2796 */
2797static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2798{
2799        struct request *rq = NULL;
2800
2801        if (cfq_cfqq_fifo_expire(cfqq))
2802                return NULL;
2803
2804        cfq_mark_cfqq_fifo_expire(cfqq);
2805
2806        if (list_empty(&cfqq->fifo))
2807                return NULL;
2808
2809        rq = rq_entry_fifo(cfqq->fifo.next);
2810        if (time_before(jiffies, rq_fifo_time(rq)))
2811                rq = NULL;
2812
2813        cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2814        return rq;
2815}
2816
2817static inline int
2818cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2819{
2820        const int base_rq = cfqd->cfq_slice_async_rq;
2821
2822        WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2823
2824        return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2825}
2826
2827/*
2828 * Must be called with the queue_lock held.
2829 */
2830static int cfqq_process_refs(struct cfq_queue *cfqq)
2831{
2832        int process_refs, io_refs;
2833
2834        io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2835        process_refs = cfqq->ref - io_refs;
2836        BUG_ON(process_refs < 0);
2837        return process_refs;
2838}
2839
2840static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2841{
2842        int process_refs, new_process_refs;
2843        struct cfq_queue *__cfqq;
2844
2845        /*
2846         * If there are no process references on the new_cfqq, then it is
2847         * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2848         * chain may have dropped their last reference (not just their
2849         * last process reference).
2850         */
2851        if (!cfqq_process_refs(new_cfqq))
2852                return;
2853
2854        /* Avoid a circular list and skip interim queue merges */
2855        while ((__cfqq = new_cfqq->new_cfqq)) {
2856                if (__cfqq == cfqq)
2857                        return;
2858                new_cfqq = __cfqq;
2859        }
2860
2861        process_refs = cfqq_process_refs(cfqq);
2862        new_process_refs = cfqq_process_refs(new_cfqq);
2863        /*
2864         * If the process for the cfqq has gone away, there is no
2865         * sense in merging the queues.
2866         */
2867        if (process_refs == 0 || new_process_refs == 0)
2868                return;
2869
2870        /*
2871         * Merge in the direction of the lesser amount of work.
2872         */
2873        if (new_process_refs >= process_refs) {
2874                cfqq->new_cfqq = new_cfqq;
2875                new_cfqq->ref += process_refs;
2876        } else {
2877                new_cfqq->new_cfqq = cfqq;
2878                cfqq->ref += new_process_refs;
2879        }
2880}
2881
2882static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
2883                        struct cfq_group *cfqg, enum wl_class_t wl_class)
2884{
2885        struct cfq_queue *queue;
2886        int i;
2887        bool key_valid = false;
2888        unsigned long lowest_key = 0;
2889        enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2890
2891        for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2892                /* select the one with lowest rb_key */
2893                queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2894                if (queue &&
2895                    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2896                        lowest_key = queue->rb_key;
2897                        cur_best = i;
2898                        key_valid = true;
2899                }
2900        }
2901
2902        return cur_best;
2903}
2904
2905static void
2906choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
2907{
2908        unsigned slice;
2909        unsigned count;
2910        struct cfq_rb_root *st;
2911        unsigned group_slice;
2912        enum wl_class_t original_class = cfqd->serving_wl_class;
2913
2914        /* Choose next priority. RT > BE > IDLE */
2915        if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2916                cfqd->serving_wl_class = RT_WORKLOAD;
2917        else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2918                cfqd->serving_wl_class = BE_WORKLOAD;
2919        else {
2920                cfqd->serving_wl_class = IDLE_WORKLOAD;
2921                cfqd->workload_expires = jiffies + 1;
2922                return;
2923        }
2924
2925        if (original_class != cfqd->serving_wl_class)
2926                goto new_workload;
2927
2928        /*
2929         * For RT and BE, we have to choose also the type
2930         * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2931         * expiration time
2932         */
2933        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2934        count = st->count;
2935
2936        /*
2937         * check workload expiration, and that we still have other queues ready
2938         */
2939        if (count && !time_after(jiffies, cfqd->workload_expires))
2940                return;
2941
2942new_workload:
2943        /* otherwise select new workload type */
2944        cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
2945                                        cfqd->serving_wl_class);
2946        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2947        count = st->count;
2948
2949        /*
2950         * the workload slice is computed as a fraction of target latency
2951         * proportional to the number of queues in that workload, over
2952         * all the queues in the same priority class
2953         */
2954        group_slice = cfq_group_slice(cfqd, cfqg);
2955
2956        slice = group_slice * count /
2957                max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2958                      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
2959                                        cfqg));
2960
2961        if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
2962                unsigned int tmp;
2963
2964                /*
2965                 * Async queues are currently system wide. Just taking
2966                 * proportion of queues with-in same group will lead to higher
2967                 * async ratio system wide as generally root group is going
2968                 * to have higher weight. A more accurate thing would be to
2969                 * calculate system wide asnc/sync ratio.
2970                 */
2971                tmp = cfqd->cfq_target_latency *
2972                        cfqg_busy_async_queues(cfqd, cfqg);
2973                tmp = tmp/cfqd->busy_queues;
2974                slice = min_t(unsigned, slice, tmp);
2975
2976                /* async workload slice is scaled down according to
2977                 * the sync/async slice ratio. */
2978                slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2979        } else
2980                /* sync workload slice is at least 2 * cfq_slice_idle */
2981                slice = max(slice, 2 * cfqd->cfq_slice_idle);
2982
2983        slice = max_t(unsigned, slice, CFQ_MIN_TT);
2984        cfq_log(cfqd, "workload slice:%d", slice);
2985        cfqd->workload_expires = jiffies + slice;
2986}
2987
2988static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2989{
2990        struct cfq_rb_root *st = &cfqd->grp_service_tree;
2991        struct cfq_group *cfqg;
2992
2993        if (RB_EMPTY_ROOT(&st->rb))
2994                return NULL;
2995        cfqg = cfq_rb_first_group(st);
2996        update_min_vdisktime(st);
2997        return cfqg;
2998}
2999
3000static void cfq_choose_cfqg(struct cfq_data *cfqd)
3001{
3002        struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3003
3004        cfqd->serving_group = cfqg;
3005
3006        /* Restore the workload type data */
3007        if (cfqg->saved_wl_slice) {
3008                cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3009                cfqd->serving_wl_type = cfqg->saved_wl_type;
3010                cfqd->serving_wl_class = cfqg->saved_wl_class;
3011        } else
3012                cfqd->workload_expires = jiffies - 1;
3013
3014        choose_wl_class_and_type(cfqd, cfqg);
3015}
3016
3017/*
3018 * Select a queue for service. If we have a current active queue,
3019 * check whether to continue servicing it, or retrieve and set a new one.
3020 */
3021static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3022{
3023        struct cfq_queue *cfqq, *new_cfqq = NULL;
3024
3025        cfqq = cfqd->active_queue;
3026        if (!cfqq)
3027                goto new_queue;
3028
3029        if (!cfqd->rq_queued)
3030                return NULL;
3031
3032        /*
3033         * We were waiting for group to get backlogged. Expire the queue
3034         */
3035        if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3036                goto expire;
3037
3038        /*
3039         * The active queue has run out of time, expire it and select new.
3040         */
3041        if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3042                /*
3043                 * If slice had not expired at the completion of last request
3044                 * we might not have turned on wait_busy flag. Don't expire
3045                 * the queue yet. Allow the group to get backlogged.
3046                 *
3047                 * The very fact that we have used the slice, that means we
3048                 * have been idling all along on this queue and it should be
3049                 * ok to wait for this request to complete.
3050                 */
3051                if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3052                    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3053                        cfqq = NULL;
3054                        goto keep_queue;
3055                } else
3056                        goto check_group_idle;
3057        }
3058
3059        /*
3060         * The active queue has requests and isn't expired, allow it to
3061         * dispatch.
3062         */
3063        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3064                goto keep_queue;
3065
3066        /*
3067         * If another queue has a request waiting within our mean seek
3068         * distance, let it run.  The expire code will check for close
3069         * cooperators and put the close queue at the front of the service
3070         * tree.  If possible, merge the expiring queue with the new cfqq.
3071         */
3072        new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3073        if (new_cfqq) {
3074                if (!cfqq->new_cfqq)
3075                        cfq_setup_merge(cfqq, new_cfqq);
3076                goto expire;
3077        }
3078
3079        /*
3080         * No requests pending. If the active queue still has requests in
3081         * flight or is idling for a new request, allow either of these
3082         * conditions to happen (or time out) before selecting a new queue.
3083         */
3084        if (timer_pending(&cfqd->idle_slice_timer)) {
3085                cfqq = NULL;
3086                goto keep_queue;
3087        }
3088
3089        /*
3090         * This is a deep seek queue, but the device is much faster than
3091         * the queue can deliver, don't idle
3092         **/
3093        if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3094            (cfq_cfqq_slice_new(cfqq) ||
3095            (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3096                cfq_clear_cfqq_deep(cfqq);
3097                cfq_clear_cfqq_idle_window(cfqq);
3098        }
3099
3100        if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3101                cfqq = NULL;
3102                goto keep_queue;
3103        }
3104
3105        /*
3106         * If group idle is enabled and there are requests dispatched from
3107         * this group, wait for requests to complete.
3108         */
3109check_group_idle:
3110        if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3111            cfqq->cfqg->dispatched &&
3112            !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3113                cfqq = NULL;
3114                goto keep_queue;
3115        }
3116
3117expire:
3118        cfq_slice_expired(cfqd, 0);
3119new_queue:
3120        /*
3121         * Current queue expired. Check if we have to switch to a new
3122         * service tree
3123         */
3124        if (!new_cfqq)
3125                cfq_choose_cfqg(cfqd);
3126
3127        cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3128keep_queue:
3129        return cfqq;
3130}
3131
3132static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3133{
3134        int dispatched = 0;
3135
3136        while (cfqq->next_rq) {
3137                cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3138                dispatched++;
3139        }
3140
3141        BUG_ON(!list_empty(&cfqq->fifo));
3142
3143        /* By default cfqq is not expired if it is empty. Do it explicitly */
3144        __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3145        return dispatched;
3146}
3147
3148/*
3149 * Drain our current requests. Used for barriers and when switching
3150 * io schedulers on-the-fly.
3151 */
3152static int cfq_forced_dispatch(struct cfq_data *cfqd)
3153{
3154        struct cfq_queue *cfqq;
3155        int dispatched = 0;
3156
3157        /* Expire the timeslice of the current active queue first */
3158        cfq_slice_expired(cfqd, 0);
3159        while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3160                __cfq_set_active_queue(cfqd, cfqq);
3161                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3162        }
3163
3164        BUG_ON(cfqd->busy_queues);
3165
3166        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3167        return dispatched;
3168}
3169
3170static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3171        struct cfq_queue *cfqq)
3172{
3173        /* the queue hasn't finished any request, can't estimate */
3174        if (cfq_cfqq_slice_new(cfqq))
3175                return true;
3176        if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3177                cfqq->slice_end))
3178                return true;
3179
3180        return false;
3181}
3182
3183static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3184{
3185        unsigned int max_dispatch;
3186
3187        /*
3188         * Drain async requests before we start sync IO
3189         */
3190        if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3191                return false;
3192
3193        /*
3194         * If this is an async queue and we have sync IO in flight, let it wait
3195         */
3196        if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3197                return false;
3198
3199        max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3200        if (cfq_class_idle(cfqq))
3201                max_dispatch = 1;
3202
3203        /*
3204         * Does this cfqq already have too much IO in flight?
3205         */
3206        if (cfqq->dispatched >= max_dispatch) {
3207                bool promote_sync = false;
3208                /*
3209                 * idle queue must always only have a single IO in flight
3210                 */
3211                if (cfq_class_idle(cfqq))
3212                        return false;
3213
3214                /*
3215                 * If there is only one sync queue
3216                 * we can ignore async queue here and give the sync
3217                 * queue no dispatch limit. The reason is a sync queue can
3218                 * preempt async queue, limiting the sync queue doesn't make
3219                 * sense. This is useful for aiostress test.
3220                 */
3221                if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3222                        promote_sync = true;
3223
3224                /*
3225                 * We have other queues, don't allow more IO from this one
3226                 */
3227                if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3228                                !promote_sync)
3229                        return false;
3230
3231                /*
3232                 * Sole queue user, no limit
3233                 */
3234                if (cfqd->busy_queues == 1 || promote_sync)
3235                        max_dispatch = -1;
3236                else
3237                        /*
3238                         * Normally we start throttling cfqq when cfq_quantum/2
3239                         * requests have been dispatched. But we can drive
3240                         * deeper queue depths at the beginning of slice
3241                         * subjected to upper limit of cfq_quantum.
3242                         * */
3243                        max_dispatch = cfqd->cfq_quantum;
3244        }
3245
3246        /*
3247         * Async queues must wait a bit before being allowed dispatch.
3248         * We also ramp up the dispatch depth gradually for async IO,
3249         * based on the last sync IO we serviced
3250         */
3251        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3252                unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3253                unsigned int depth;
3254
3255                depth = last_sync / cfqd->cfq_slice[1];
3256                if (!depth && !cfqq->dispatched)
3257                        depth = 1;
3258                if (depth < max_dispatch)
3259                        max_dispatch = depth;
3260        }
3261
3262        /*
3263         * If we're below the current max, allow a dispatch
3264         */
3265        return cfqq->dispatched < max_dispatch;
3266}
3267
3268/*
3269 * Dispatch a request from cfqq, moving them to the request queue
3270 * dispatch list.
3271 */
3272static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3273{
3274        struct request *rq;
3275
3276        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3277
3278        if (!cfq_may_dispatch(cfqd, cfqq))
3279                return false;
3280
3281        /*
3282         * follow expired path, else get first next available
3283         */
3284        rq = cfq_check_fifo(cfqq);
3285        if (!rq)
3286                rq = cfqq->next_rq;
3287
3288        /*
3289         * insert request into driver dispatch list
3290         */
3291        cfq_dispatch_insert(cfqd->queue, rq);
3292
3293        if (!cfqd->active_cic) {
3294                struct cfq_io_cq *cic = RQ_CIC(rq);
3295
3296                atomic_long_inc(&cic->icq.ioc->refcount);
3297                cfqd->active_cic = cic;
3298        }
3299
3300        return true;
3301}
3302
3303/*
3304 * Find the cfqq that we need to service and move a request from that to the
3305 * dispatch list
3306 */
3307static int cfq_dispatch_requests(struct request_queue *q, int force)
3308{
3309        struct cfq_data *cfqd = q->elevator->elevator_data;
3310        struct cfq_queue *cfqq;
3311
3312        if (!cfqd->busy_queues)
3313                return 0;
3314
3315        if (unlikely(force))
3316                return cfq_forced_dispatch(cfqd);
3317
3318        cfqq = cfq_select_queue(cfqd);
3319        if (!cfqq)
3320                return 0;
3321
3322        /*
3323         * Dispatch a request from this cfqq, if it is allowed
3324         */
3325        if (!cfq_dispatch_request(cfqd, cfqq))
3326                return 0;
3327
3328        cfqq->slice_dispatch++;
3329        cfq_clear_cfqq_must_dispatch(cfqq);
3330
3331        /*
3332         * expire an async queue immediately if it has used up its slice. idle
3333         * queue always expire after 1 dispatch round.
3334         */
3335        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3336            cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3337            cfq_class_idle(cfqq))) {
3338                cfqq->slice_end = jiffies + 1;
3339                cfq_slice_expired(cfqd, 0);
3340        }
3341
3342        cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3343        return 1;
3344}
3345
3346/*
3347 * task holds one reference to the queue, dropped when task exits. each rq
3348 * in-flight on this queue also holds a reference, dropped when rq is freed.
3349 *
3350 * Each cfq queue took a reference on the parent group. Drop it now.
3351 * queue lock must be held here.
3352 */
3353static void cfq_put_queue(struct cfq_queue *cfqq)
3354{
3355        struct cfq_data *cfqd = cfqq->cfqd;
3356        struct cfq_group *cfqg;
3357
3358        BUG_ON(cfqq->ref <= 0);
3359
3360        cfqq->ref--;
3361        if (cfqq->ref)
3362                return;
3363
3364        cfq_log_cfqq(cfqd, cfqq, "put_queue");
3365        BUG_ON(rb_first(&cfqq->sort_list));
3366        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3367        cfqg = cfqq->cfqg;
3368
3369        if (unlikely(cfqd->active_queue == cfqq)) {
3370                __cfq_slice_expired(cfqd, cfqq, 0);
3371                cfq_schedule_dispatch(cfqd);
3372        }
3373
3374        BUG_ON(cfq_cfqq_on_rr(cfqq));
3375        kmem_cache_free(cfq_pool, cfqq);
3376        cfqg_put(cfqg);
3377}
3378
3379static void cfq_put_cooperator(struct cfq_queue *cfqq)
3380{
3381        struct cfq_queue *__cfqq, *next;
3382
3383        /*
3384         * If this queue was scheduled to merge with another queue, be
3385         * sure to drop the reference taken on that queue (and others in
3386         * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3387         */
3388        __cfqq = cfqq->new_cfqq;
3389        while (__cfqq) {
3390                if (__cfqq == cfqq) {
3391                        WARN(1, "cfqq->new_cfqq loop detected\n");
3392                        break;
3393                }
3394                next = __cfqq->new_cfqq;
3395                cfq_put_queue(__cfqq);
3396                __cfqq = next;
3397        }
3398}
3399
3400static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3401{
3402        if (unlikely(cfqq == cfqd->active_queue)) {
3403                __cfq_slice_expired(cfqd, cfqq, 0);
3404                cfq_schedule_dispatch(cfqd);
3405        }
3406
3407        cfq_put_cooperator(cfqq);
3408
3409        cfq_put_queue(cfqq);
3410}
3411
3412static void cfq_init_icq(struct io_cq *icq)
3413{
3414        struct cfq_io_cq *cic = icq_to_cic(icq);
3415
3416        cic->ttime.last_end_request = jiffies;
3417}
3418
3419static void cfq_exit_icq(struct io_cq *icq)
3420{
3421        struct cfq_io_cq *cic = icq_to_cic(icq);
3422        struct cfq_data *cfqd = cic_to_cfqd(cic);
3423
3424        if (cic->cfqq[BLK_RW_ASYNC]) {
3425                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3426                cic->cfqq[BLK_RW_ASYNC] = NULL;
3427        }
3428
3429        if (cic->cfqq[BLK_RW_SYNC]) {
3430                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3431                cic->cfqq[BLK_RW_SYNC] = NULL;
3432        }
3433}
3434
3435static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3436{
3437        struct task_struct *tsk = current;
3438        int ioprio_class;
3439
3440        if (!cfq_cfqq_prio_changed(cfqq))
3441                return;
3442
3443        ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3444        switch (ioprio_class) {
3445        default:
3446                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3447        case IOPRIO_CLASS_NONE:
3448                /*
3449                 * no prio set, inherit CPU scheduling settings
3450                 */
3451                cfqq->ioprio = task_nice_ioprio(tsk);
3452                cfqq->ioprio_class = task_nice_ioclass(tsk);
3453                break;
3454        case IOPRIO_CLASS_RT:
3455                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3456                cfqq->ioprio_class = IOPRIO_CLASS_RT;
3457                break;
3458        case IOPRIO_CLASS_BE:
3459                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3460                cfqq->ioprio_class = IOPRIO_CLASS_BE;
3461                break;
3462        case IOPRIO_CLASS_IDLE:
3463                cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3464                cfqq->ioprio = 7;
3465                cfq_clear_cfqq_idle_window(cfqq);
3466                break;
3467        }
3468
3469        /*
3470         * keep track of original prio settings in case we have to temporarily
3471         * elevate the priority of this queue
3472         */
3473        cfqq->org_ioprio = cfqq->ioprio;
3474        cfq_clear_cfqq_prio_changed(cfqq);
3475}
3476
3477static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3478{
3479        int ioprio = cic->icq.ioc->ioprio;
3480        struct cfq_data *cfqd = cic_to_cfqd(cic);
3481        struct cfq_queue *cfqq;
3482
3483        /*
3484         * Check whether ioprio has changed.  The condition may trigger
3485         * spuriously on a newly created cic but there's no harm.
3486         */
3487        if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3488                return;
3489
3490        cfqq = cic->cfqq[BLK_RW_ASYNC];
3491        if (cfqq) {
3492                struct cfq_queue *new_cfqq;
3493                new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3494                                         GFP_ATOMIC);
3495                if (new_cfqq) {
3496                        cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3497                        cfq_put_queue(cfqq);
3498                }
3499        }
3500
3501        cfqq = cic->cfqq[BLK_RW_SYNC];
3502        if (cfqq)
3503                cfq_mark_cfqq_prio_changed(cfqq);
3504
3505        cic->ioprio = ioprio;
3506}
3507
3508static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3509                          pid_t pid, bool is_sync)
3510{
3511        RB_CLEAR_NODE(&cfqq->rb_node);
3512        RB_CLEAR_NODE(&cfqq->p_node);
3513        INIT_LIST_HEAD(&cfqq->fifo);
3514
3515        cfqq->ref = 0;
3516        cfqq->cfqd = cfqd;
3517
3518        cfq_mark_cfqq_prio_changed(cfqq);
3519
3520        if (is_sync) {
3521                if (!cfq_class_idle(cfqq))
3522                        cfq_mark_cfqq_idle_window(cfqq);
3523                cfq_mark_cfqq_sync(cfqq);
3524        }
3525        cfqq->pid = pid;
3526}
3527
3528#ifdef CONFIG_CFQ_GROUP_IOSCHED
3529static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3530{
3531        struct cfq_data *cfqd = cic_to_cfqd(cic);
3532        struct cfq_queue *sync_cfqq;
3533        uint64_t id;
3534
3535        rcu_read_lock();
3536        id = bio_blkcg(bio)->id;
3537        rcu_read_unlock();
3538
3539        /*
3540         * Check whether blkcg has changed.  The condition may trigger
3541         * spuriously on a newly created cic but there's no harm.
3542         */
3543        if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3544                return;
3545
3546        sync_cfqq = cic_to_cfqq(cic, 1);
3547        if (sync_cfqq) {
3548                /*
3549                 * Drop reference to sync queue. A new sync queue will be
3550                 * assigned in new group upon arrival of a fresh request.
3551                 */
3552                cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3553                cic_set_cfqq(cic, NULL, 1);
3554                cfq_put_queue(sync_cfqq);
3555        }
3556
3557        cic->blkcg_id = id;
3558}
3559#else
3560static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3561#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3562
3563static struct cfq_queue *
3564cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3565                     struct bio *bio, gfp_t gfp_mask)
3566{
3567        struct blkcg *blkcg;
3568        struct cfq_queue *cfqq, *new_cfqq = NULL;
3569        struct cfq_group *cfqg;
3570
3571retry:
3572        rcu_read_lock();
3573
3574        blkcg = bio_blkcg(bio);
3575        cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3576        cfqq = cic_to_cfqq(cic, is_sync);
3577
3578        /*
3579         * Always try a new alloc if we fell back to the OOM cfqq
3580         * originally, since it should just be a temporary situation.
3581         */
3582        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3583                cfqq = NULL;
3584                if (new_cfqq) {
3585                        cfqq = new_cfqq;
3586                        new_cfqq = NULL;
3587                } else if (gfp_mask & __GFP_WAIT) {
3588                        rcu_read_unlock();
3589                        spin_unlock_irq(cfqd->queue->queue_lock);
3590                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
3591                                        gfp_mask | __GFP_ZERO,
3592                                        cfqd->queue->node);
3593                        spin_lock_irq(cfqd->queue->queue_lock);
3594                        if (new_cfqq)
3595                                goto retry;
3596                        else
3597                                return &cfqd->oom_cfqq;
3598                } else {
3599                        cfqq = kmem_cache_alloc_node(cfq_pool,
3600                                        gfp_mask | __GFP_ZERO,
3601                                        cfqd->queue->node);
3602                }
3603
3604                if (cfqq) {
3605                        cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3606                        cfq_init_prio_data(cfqq, cic);
3607                        cfq_link_cfqq_cfqg(cfqq, cfqg);
3608                        cfq_log_cfqq(cfqd, cfqq, "alloced");
3609                } else
3610                        cfqq = &cfqd->oom_cfqq;
3611        }
3612
3613        if (new_cfqq)
3614                kmem_cache_free(cfq_pool, new_cfqq);
3615
3616        rcu_read_unlock();
3617        return cfqq;
3618}
3619
3620static struct cfq_queue **
3621cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3622{
3623        switch (ioprio_class) {
3624        case IOPRIO_CLASS_RT:
3625                return &cfqd->async_cfqq[0][ioprio];
3626        case IOPRIO_CLASS_NONE:
3627                ioprio = IOPRIO_NORM;
3628                /* fall through */
3629        case IOPRIO_CLASS_BE:
3630                return &cfqd->async_cfqq[1][ioprio];
3631        case IOPRIO_CLASS_IDLE:
3632                return &cfqd->async_idle_cfqq;
3633        default:
3634                BUG();
3635        }
3636}
3637
3638static struct cfq_queue *
3639cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3640              struct bio *bio, gfp_t gfp_mask)
3641{
3642        const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3643        const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3644        struct cfq_queue **async_cfqq = NULL;
3645        struct cfq_queue *cfqq = NULL;
3646
3647        if (!is_sync) {
3648                async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3649                cfqq = *async_cfqq;
3650        }
3651
3652        if (!cfqq)
3653                cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3654
3655        /*
3656         * pin the queue now that it's allocated, scheduler exit will prune it
3657         */
3658        if (!is_sync && !(*async_cfqq)) {
3659                cfqq->ref++;
3660                *async_cfqq = cfqq;
3661        }
3662
3663        cfqq->ref++;
3664        return cfqq;
3665}
3666
3667static void
3668__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3669{
3670        unsigned long elapsed = jiffies - ttime->last_end_request;
3671        elapsed = min(elapsed, 2UL * slice_idle);
3672
3673        ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3674        ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3675        ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3676}
3677
3678static void
3679cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3680                        struct cfq_io_cq *cic)
3681{
3682        if (cfq_cfqq_sync(cfqq)) {
3683                __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3684                __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3685                        cfqd->cfq_slice_idle);
3686        }
3687#ifdef CONFIG_CFQ_GROUP_IOSCHED
3688        __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3689#endif
3690}
3691
3692static void
3693cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3694                       struct request *rq)
3695{
3696        sector_t sdist = 0;
3697        sector_t n_sec = blk_rq_sectors(rq);
3698        if (cfqq->last_request_pos) {
3699                if (cfqq->last_request_pos < blk_rq_pos(rq))
3700                        sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3701                else
3702                        sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3703        }
3704
3705        cfqq->seek_history <<= 1;
3706        if (blk_queue_nonrot(cfqd->queue))
3707                cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3708        else
3709                cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3710}
3711
3712/*
3713 * Disable idle window if the process thinks too long or seeks so much that
3714 * it doesn't matter
3715 */
3716static void
3717cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3718                       struct cfq_io_cq *cic)
3719{
3720        int old_idle, enable_idle;
3721
3722        /*
3723         * Don't idle for async or idle io prio class
3724         */
3725        if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3726                return;
3727
3728        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3729
3730        if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3731                cfq_mark_cfqq_deep(cfqq);
3732
3733        if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3734                enable_idle = 0;
3735        else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3736                 !cfqd->cfq_slice_idle ||
3737                 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3738                enable_idle = 0;
3739        else if (sample_valid(cic->ttime.ttime_samples)) {
3740                if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3741                        enable_idle = 0;
3742                else
3743                        enable_idle = 1;
3744        }
3745
3746        if (old_idle != enable_idle) {
3747                cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3748                if (enable_idle)
3749                        cfq_mark_cfqq_idle_window(cfqq);
3750                else
3751                        cfq_clear_cfqq_idle_window(cfqq);
3752        }
3753}
3754
3755/*
3756 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3757 * no or if we aren't sure, a 1 will cause a preempt.
3758 */
3759static bool
3760cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3761                   struct request *rq)
3762{
3763        struct cfq_queue *cfqq;
3764
3765        cfqq = cfqd->active_queue;
3766        if (!cfqq)
3767                return false;
3768
3769        if (cfq_class_idle(new_cfqq))
3770                return false;
3771
3772        if (cfq_class_idle(cfqq))
3773                return true;
3774
3775        /*
3776         * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3777         */
3778        if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3779                return false;
3780
3781        /*
3782         * if the new request is sync, but the currently running queue is
3783         * not, let the sync request have priority.
3784         */
3785        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3786                return true;
3787
3788        if (new_cfqq->cfqg != cfqq->cfqg)
3789                return false;
3790
3791        if (cfq_slice_used(cfqq))
3792                return true;
3793
3794        /* Allow preemption only if we are idling on sync-noidle tree */
3795        if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3796            cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3797            new_cfqq->service_tree->count == 2 &&
3798            RB_EMPTY_ROOT(&cfqq->sort_list))
3799                return true;
3800
3801        /*
3802         * So both queues are sync. Let the new request get disk time if
3803         * it's a metadata request and the current queue is doing regular IO.
3804         */
3805        if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3806                return true;
3807
3808        /*
3809         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3810         */
3811        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3812                return true;
3813
3814        /* An idle queue should not be idle now for some reason */
3815        if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3816                return true;
3817
3818        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3819                return false;
3820
3821        /*
3822         * if this request is as-good as one we would expect from the
3823         * current cfqq, let it preempt
3824         */
3825        if (cfq_rq_close(cfqd, cfqq, rq))
3826                return true;
3827
3828        return false;
3829}
3830
3831/*
3832 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3833 * let it have half of its nominal slice.
3834 */
3835static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3836{
3837        enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3838
3839        cfq_log_cfqq(cfqd, cfqq, "preempt");
3840        cfq_slice_expired(cfqd, 1);
3841
3842        /*
3843         * workload type is changed, don't save slice, otherwise preempt
3844         * doesn't happen
3845         */
3846        if (old_type != cfqq_type(cfqq))
3847                cfqq->cfqg->saved_wl_slice = 0;
3848
3849        /*
3850         * Put the new queue at the front of the of the current list,
3851         * so we know that it will be selected next.
3852         */
3853        BUG_ON(!cfq_cfqq_on_rr(cfqq));
3854
3855        cfq_service_tree_add(cfqd, cfqq, 1);
3856
3857        cfqq->slice_end = 0;
3858        cfq_mark_cfqq_slice_new(cfqq);
3859}
3860
3861/*
3862 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3863 * something we should do about it
3864 */
3865static void
3866cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3867                struct request *rq)
3868{
3869        struct cfq_io_cq *cic = RQ_CIC(rq);
3870
3871        cfqd->rq_queued++;
3872        if (rq->cmd_flags & REQ_PRIO)
3873                cfqq->prio_pending++;
3874
3875        cfq_update_io_thinktime(cfqd, cfqq, cic);
3876        cfq_update_io_seektime(cfqd, cfqq, rq);
3877        cfq_update_idle_window(cfqd, cfqq, cic);
3878
3879        cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3880
3881        if (cfqq == cfqd->active_queue) {
3882                /*
3883                 * Remember that we saw a request from this process, but
3884                 * don't start queuing just yet. Otherwise we risk seeing lots
3885                 * of tiny requests, because we disrupt the normal plugging
3886                 * and merging. If the request is already larger than a single
3887                 * page, let it rip immediately. For that case we assume that
3888                 * merging is already done. Ditto for a busy system that
3889                 * has other work pending, don't risk delaying until the
3890                 * idle timer unplug to continue working.
3891                 */
3892                if (cfq_cfqq_wait_request(cfqq)) {
3893                        if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3894                            cfqd->busy_queues > 1) {
3895                                cfq_del_timer(cfqd, cfqq);
3896                                cfq_clear_cfqq_wait_request(cfqq);
3897                                __blk_run_queue(cfqd->queue);
3898                        } else {
3899                                cfqg_stats_update_idle_time(cfqq->cfqg);
3900                                cfq_mark_cfqq_must_dispatch(cfqq);
3901                        }
3902                }
3903        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3904                /*
3905                 * not the active queue - expire current slice if it is
3906                 * idle and has expired it's mean thinktime or this new queue
3907                 * has some old slice time left and is of higher priority or
3908                 * this new queue is RT and the current one is BE
3909                 */
3910                cfq_preempt_queue(cfqd, cfqq);
3911                __blk_run_queue(cfqd->queue);
3912        }
3913}
3914
3915static void cfq_insert_request(struct request_queue *q, struct request *rq)
3916{
3917        struct cfq_data *cfqd = q->elevator->elevator_data;
3918        struct cfq_queue *cfqq = RQ_CFQQ(rq);
3919
3920        cfq_log_cfqq(cfqd, cfqq, "insert_request");
3921        cfq_init_prio_data(cfqq, RQ_CIC(rq));
3922
3923        rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3924        list_add_tail(&rq->queuelist, &cfqq->fifo);
3925        cfq_add_rq_rb(rq);
3926        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3927                                 rq->cmd_flags);
3928        cfq_rq_enqueued(cfqd, cfqq, rq);
3929}
3930
3931/*
3932 * Update hw_tag based on peak queue depth over 50 samples under
3933 * sufficient load.
3934 */
3935static void cfq_update_hw_tag(struct cfq_data *cfqd)
3936{
3937        struct cfq_queue *cfqq = cfqd->active_queue;
3938
3939        if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3940                cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3941
3942        if (cfqd->hw_tag == 1)
3943                return;
3944
3945        if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3946            cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3947                return;
3948
3949        /*
3950         * If active queue hasn't enough requests and can idle, cfq might not
3951         * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3952         * case
3953         */
3954        if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3955            cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3956            CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3957                return;
3958
3959        if (cfqd->hw_tag_samples++ < 50)
3960                return;
3961
3962        if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3963                cfqd->hw_tag = 1;
3964        else
3965                cfqd->hw_tag = 0;
3966}
3967
3968static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3969{
3970        struct cfq_io_cq *cic = cfqd->active_cic;
3971
3972        /* If the queue already has requests, don't wait */
3973        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3974                return false;
3975
3976        /* If there are other queues in the group, don't wait */
3977        if (cfqq->cfqg->nr_cfqq > 1)
3978                return false;
3979
3980        /* the only queue in the group, but think time is big */
3981        if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3982                return false;
3983
3984        if (cfq_slice_used(cfqq))
3985                return true;
3986
3987        /* if slice left is less than think time, wait busy */
3988        if (cic && sample_valid(cic->ttime.ttime_samples)
3989            && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3990                return true;
3991
3992        /*
3993         * If think times is less than a jiffy than ttime_mean=0 and above
3994         * will not be true. It might happen that slice has not expired yet
3995         * but will expire soon (4-5 ns) during select_queue(). To cover the
3996         * case where think time is less than a jiffy, mark the queue wait
3997         * busy if only 1 jiffy is left in the slice.
3998         */
3999        if (cfqq->slice_end - jiffies == 1)
4000                return true;
4001
4002        return false;
4003}
4004
4005static void cfq_completed_request(struct request_queue *q, struct request *rq)
4006{
4007        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4008        struct cfq_data *cfqd = cfqq->cfqd;
4009        const int sync = rq_is_sync(rq);
4010        unsigned long now;
4011
4012        now = jiffies;
4013        cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4014                     !!(rq->cmd_flags & REQ_NOIDLE));
4015
4016        cfq_update_hw_tag(cfqd);
4017
4018        WARN_ON(!cfqd->rq_in_driver);
4019        WARN_ON(!cfqq->dispatched);
4020        cfqd->rq_in_driver--;
4021        cfqq->dispatched--;
4022        (RQ_CFQG(rq))->dispatched--;
4023        cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4024                                     rq_io_start_time_ns(rq), rq->cmd_flags);
4025
4026        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4027
4028        if (sync) {
4029                struct cfq_rb_root *st;
4030
4031                RQ_CIC(rq)->ttime.last_end_request = now;
4032
4033                if (cfq_cfqq_on_rr(cfqq))
4034                        st = cfqq->service_tree;
4035                else
4036                        st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4037                                        cfqq_type(cfqq));
4038
4039                st->ttime.last_end_request = now;
4040                if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4041                        cfqd->last_delayed_sync = now;
4042        }
4043
4044#ifdef CONFIG_CFQ_GROUP_IOSCHED
4045        cfqq->cfqg->ttime.last_end_request = now;
4046#endif
4047
4048        /*
4049         * If this is the active queue, check if it needs to be expired,
4050         * or if we want to idle in case it has no pending requests.
4051         */
4052        if (cfqd->active_queue == cfqq) {
4053                const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4054
4055                if (cfq_cfqq_slice_new(cfqq)) {
4056                        cfq_set_prio_slice(cfqd, cfqq);
4057                        cfq_clear_cfqq_slice_new(cfqq);
4058                }
4059
4060                /*
4061                 * Should we wait for next request to come in before we expire
4062                 * the queue.
4063                 */
4064                if (cfq_should_wait_busy(cfqd, cfqq)) {
4065                        unsigned long extend_sl = cfqd->cfq_slice_idle;
4066                        if (!cfqd->cfq_slice_idle)
4067                                extend_sl = cfqd->cfq_group_idle;
4068                        cfqq->slice_end = jiffies + extend_sl;
4069                        cfq_mark_cfqq_wait_busy(cfqq);
4070                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4071                }
4072
4073                /*
4074                 * Idling is not enabled on:
4075                 * - expired queues
4076                 * - idle-priority queues
4077                 * - async queues
4078                 * - queues with still some requests queued
4079                 * - when there is a close cooperator
4080                 */
4081                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4082                        cfq_slice_expired(cfqd, 1);
4083                else if (sync && cfqq_empty &&
4084                         !cfq_close_cooperator(cfqd, cfqq)) {
4085                        cfq_arm_slice_timer(cfqd);
4086                }
4087        }
4088
4089        if (!cfqd->rq_in_driver)
4090                cfq_schedule_dispatch(cfqd);
4091}
4092
4093static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4094{
4095        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4096                cfq_mark_cfqq_must_alloc_slice(cfqq);
4097                return ELV_MQUEUE_MUST;
4098        }
4099
4100        return ELV_MQUEUE_MAY;
4101}
4102
4103static int cfq_may_queue(struct request_queue *q, int rw)
4104{
4105        struct cfq_data *cfqd = q->elevator->elevator_data;
4106        struct task_struct *tsk = current;
4107        struct cfq_io_cq *cic;
4108        struct cfq_queue *cfqq;
4109
4110        /*
4111         * don't force setup of a queue from here, as a call to may_queue
4112         * does not necessarily imply that a request actually will be queued.
4113         * so just lookup a possibly existing queue, or return 'may queue'
4114         * if that fails
4115         */
4116        cic = cfq_cic_lookup(cfqd, tsk->io_context);
4117        if (!cic)
4118                return ELV_MQUEUE_MAY;
4119
4120        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4121        if (cfqq) {
4122                cfq_init_prio_data(cfqq, cic);
4123
4124                return __cfq_may_queue(cfqq);
4125        }
4126
4127        return ELV_MQUEUE_MAY;
4128}
4129
4130/*
4131 * queue lock held here
4132 */
4133static void cfq_put_request(struct request *rq)
4134{
4135        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4136
4137        if (cfqq) {
4138                const int rw = rq_data_dir(rq);
4139
4140                BUG_ON(!cfqq->allocated[rw]);
4141                cfqq->allocated[rw]--;
4142
4143                /* Put down rq reference on cfqg */
4144                cfqg_put(RQ_CFQG(rq));
4145                rq->elv.priv[0] = NULL;
4146                rq->elv.priv[1] = NULL;
4147
4148                cfq_put_queue(cfqq);
4149        }
4150}
4151
4152static struct cfq_queue *
4153cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4154                struct cfq_queue *cfqq)
4155{
4156        cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4157        cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4158        cfq_mark_cfqq_coop(cfqq->new_cfqq);
4159        cfq_put_queue(cfqq);
4160        return cic_to_cfqq(cic, 1);
4161}
4162
4163/*
4164 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4165 * was the last process referring to said cfqq.
4166 */
4167static struct cfq_queue *
4168split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4169{
4170        if (cfqq_process_refs(cfqq) == 1) {
4171                cfqq->pid = current->pid;
4172                cfq_clear_cfqq_coop(cfqq);
4173                cfq_clear_cfqq_split_coop(cfqq);
4174                return cfqq;
4175        }
4176
4177        cic_set_cfqq(cic, NULL, 1);
4178
4179        cfq_put_cooperator(cfqq);
4180
4181        cfq_put_queue(cfqq);
4182        return NULL;
4183}
4184/*
4185 * Allocate cfq data structures associated with this request.
4186 */
4187static int
4188cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4189                gfp_t gfp_mask)
4190{
4191        struct cfq_data *cfqd = q->elevator->elevator_data;
4192        struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4193        const int rw = rq_data_dir(rq);
4194        const bool is_sync = rq_is_sync(rq);
4195        struct cfq_queue *cfqq;
4196
4197        might_sleep_if(gfp_mask & __GFP_WAIT);
4198
4199        spin_lock_irq(q->queue_lock);
4200
4201        check_ioprio_changed(cic, bio);
4202        check_blkcg_changed(cic, bio);
4203new_queue:
4204        cfqq = cic_to_cfqq(cic, is_sync);
4205        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4206                cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
4207                cic_set_cfqq(cic, cfqq, is_sync);
4208        } else {
4209                /*
4210                 * If the queue was seeky for too long, break it apart.
4211                 */
4212                if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4213                        cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4214                        cfqq = split_cfqq(cic, cfqq);
4215                        if (!cfqq)
4216                                goto new_queue;
4217                }
4218
4219                /*
4220                 * Check to see if this queue is scheduled to merge with
4221                 * another, closely cooperating queue.  The merging of
4222                 * queues happens here as it must be done in process context.
4223                 * The reference on new_cfqq was taken in merge_cfqqs.
4224                 */
4225                if (cfqq->new_cfqq)
4226                        cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4227        }
4228
4229        cfqq->allocated[rw]++;
4230
4231        cfqq->ref++;
4232        cfqg_get(cfqq->cfqg);
4233        rq->elv.priv[0] = cfqq;
4234        rq->elv.priv[1] = cfqq->cfqg;
4235        spin_unlock_irq(q->queue_lock);
4236        return 0;
4237}
4238
4239static void cfq_kick_queue(struct work_struct *work)
4240{
4241        struct cfq_data *cfqd =
4242                container_of(work, struct cfq_data, unplug_work);
4243        struct request_queue *q = cfqd->queue;
4244
4245        spin_lock_irq(q->queue_lock);
4246        __blk_run_queue(cfqd->queue);
4247        spin_unlock_irq(q->queue_lock);
4248}
4249
4250/*
4251 * Timer running if the active_queue is currently idling inside its time slice
4252 */
4253static void cfq_idle_slice_timer(unsigned long data)
4254{
4255        struct cfq_data *cfqd = (struct cfq_data *) data;
4256        struct cfq_queue *cfqq;
4257        unsigned long flags;
4258        int timed_out = 1;
4259
4260        cfq_log(cfqd, "idle timer fired");
4261
4262        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4263
4264        cfqq = cfqd->active_queue;
4265        if (cfqq) {
4266                timed_out = 0;
4267
4268                /*
4269                 * We saw a request before the queue expired, let it through
4270                 */
4271                if (cfq_cfqq_must_dispatch(cfqq))
4272                        goto out_kick;
4273
4274                /*
4275                 * expired
4276                 */
4277                if (cfq_slice_used(cfqq))
4278                        goto expire;
4279
4280                /*
4281                 * only expire and reinvoke request handler, if there are
4282                 * other queues with pending requests
4283                 */
4284                if (!cfqd->busy_queues)
4285                        goto out_cont;
4286
4287                /*
4288                 * not expired and it has a request pending, let it dispatch
4289                 */
4290                if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4291                        goto out_kick;
4292
4293                /*
4294                 * Queue depth flag is reset only when the idle didn't succeed
4295                 */
4296                cfq_clear_cfqq_deep(cfqq);
4297        }
4298expire:
4299        cfq_slice_expired(cfqd, timed_out);
4300out_kick:
4301        cfq_schedule_dispatch(cfqd);
4302out_cont:
4303        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4304}
4305
4306static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4307{
4308        del_timer_sync(&cfqd->idle_slice_timer);
4309        cancel_work_sync(&cfqd->unplug_work);
4310}
4311
4312static void cfq_put_async_queues(struct cfq_data *cfqd)
4313{
4314        int i;
4315
4316        for (i = 0; i < IOPRIO_BE_NR; i++) {
4317                if (cfqd->async_cfqq[0][i])
4318                        cfq_put_queue(cfqd->async_cfqq[0][i]);
4319                if (cfqd->async_cfqq[1][i])
4320                        cfq_put_queue(cfqd->async_cfqq[1][i]);
4321        }
4322
4323        if (cfqd->async_idle_cfqq)
4324                cfq_put_queue(cfqd->async_idle_cfqq);
4325}
4326
4327static void cfq_exit_queue(struct elevator_queue *e)
4328{
4329        struct cfq_data *cfqd = e->elevator_data;
4330        struct request_queue *q = cfqd->queue;
4331
4332        cfq_shutdown_timer_wq(cfqd);
4333
4334        spin_lock_irq(q->queue_lock);
4335
4336        if (cfqd->active_queue)
4337                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4338
4339        cfq_put_async_queues(cfqd);
4340
4341        spin_unlock_irq(q->queue_lock);
4342
4343        cfq_shutdown_timer_wq(cfqd);
4344
4345#ifdef CONFIG_CFQ_GROUP_IOSCHED
4346        blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4347#else
4348        kfree(cfqd->root_group);
4349#endif
4350        kfree(cfqd);
4351}
4352
4353static int cfq_init_queue(struct request_queue *q)
4354{
4355        struct cfq_data *cfqd;
4356        struct blkcg_gq *blkg __maybe_unused;
4357        int i, ret;
4358
4359        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
4360        if (!cfqd)
4361                return -ENOMEM;
4362
4363        cfqd->queue = q;
4364        q->elevator->elevator_data = cfqd;
4365
4366        /* Init root service tree */
4367        cfqd->grp_service_tree = CFQ_RB_ROOT;
4368
4369        /* Init root group and prefer root group over other groups by default */
4370#ifdef CONFIG_CFQ_GROUP_IOSCHED
4371        ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4372        if (ret)
4373                goto out_free;
4374
4375        cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4376#else
4377        ret = -ENOMEM;
4378        cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4379                                        GFP_KERNEL, cfqd->queue->node);
4380        if (!cfqd->root_group)
4381                goto out_free;
4382
4383        cfq_init_cfqg_base(cfqd->root_group);
4384#endif
4385        cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
4386        cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
4387
4388        /*
4389         * Not strictly needed (since RB_ROOT just clears the node and we
4390         * zeroed cfqd on alloc), but better be safe in case someone decides
4391         * to add magic to the rb code
4392         */
4393        for (i = 0; i < CFQ_PRIO_LISTS; i++)
4394                cfqd->prio_trees[i] = RB_ROOT;
4395
4396        /*
4397         * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4398         * Grab a permanent reference to it, so that the normal code flow
4399         * will not attempt to free it.  oom_cfqq is linked to root_group
4400         * but shouldn't hold a reference as it'll never be unlinked.  Lose
4401         * the reference from linking right away.
4402         */
4403        cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4404        cfqd->oom_cfqq.ref++;
4405
4406        spin_lock_irq(q->queue_lock);
4407        cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4408        cfqg_put(cfqd->root_group);
4409        spin_unlock_irq(q->queue_lock);
4410
4411        init_timer(&cfqd->idle_slice_timer);
4412        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4413        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4414
4415        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4416
4417        cfqd->cfq_quantum = cfq_quantum;
4418        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4419        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4420        cfqd->cfq_back_max = cfq_back_max;
4421        cfqd->cfq_back_penalty = cfq_back_penalty;
4422        cfqd->cfq_slice[0] = cfq_slice_async;
4423        cfqd->cfq_slice[1] = cfq_slice_sync;
4424        cfqd->cfq_target_latency = cfq_target_latency;
4425        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4426        cfqd->cfq_slice_idle = cfq_slice_idle;
4427        cfqd->cfq_group_idle = cfq_group_idle;
4428        cfqd->cfq_latency = 1;
4429        cfqd->hw_tag = -1;
4430        /*
4431         * we optimistically start assuming sync ops weren't delayed in last
4432         * second, in order to have larger depth for async operations.
4433         */
4434        cfqd->last_delayed_sync = jiffies - HZ;
4435        return 0;
4436
4437out_free:
4438        kfree(cfqd);
4439        return ret;
4440}
4441
4442/*
4443 * sysfs parts below -->
4444 */
4445static ssize_t
4446cfq_var_show(unsigned int var, char *page)
4447{
4448        return sprintf(page, "%d\n", var);
4449}
4450
4451static ssize_t
4452cfq_var_store(unsigned int *var, const char *page, size_t count)
4453{
4454        char *p = (char *) page;
4455
4456        *var = simple_strtoul(p, &p, 10);
4457        return count;
4458}
4459
4460#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4461static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4462{                                                                       \
4463        struct cfq_data *cfqd = e->elevator_data;                       \
4464        unsigned int __data = __VAR;                                    \
4465        if (__CONV)                                                     \
4466                __data = jiffies_to_msecs(__data);                      \
4467        return cfq_var_show(__data, (page));                            \
4468}
4469SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4470SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4471SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4472SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4473SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4474SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4475SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4476SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4477SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4478SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4479SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4480SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4481#undef SHOW_FUNCTION
4482
4483#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4484static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4485{                                                                       \
4486        struct cfq_data *cfqd = e->elevator_data;                       \
4487        unsigned int __data;                                            \
4488        int ret = cfq_var_store(&__data, (page), count);                \
4489        if (__data < (MIN))                                             \
4490                __data = (MIN);                                         \
4491        else if (__data > (MAX))                                        \
4492                __data = (MAX);                                         \
4493        if (__CONV)                                                     \
4494                *(__PTR) = msecs_to_jiffies(__data);                    \
4495        else                                                            \
4496                *(__PTR) = __data;                                      \
4497        return ret;                                                     \
4498}
4499STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4500STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4501                UINT_MAX, 1);
4502STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4503                UINT_MAX, 1);
4504STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4505STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4506                UINT_MAX, 0);
4507STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4508STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4509STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4510STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4511STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4512                UINT_MAX, 0);
4513STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4514STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4515#undef STORE_FUNCTION
4516
4517#define CFQ_ATTR(name) \
4518        __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4519
4520static struct elv_fs_entry cfq_attrs[] = {
4521        CFQ_ATTR(quantum),
4522        CFQ_ATTR(fifo_expire_sync),
4523        CFQ_ATTR(fifo_expire_async),
4524        CFQ_ATTR(back_seek_max),
4525        CFQ_ATTR(back_seek_penalty),
4526        CFQ_ATTR(slice_sync),
4527        CFQ_ATTR(slice_async),
4528        CFQ_ATTR(slice_async_rq),
4529        CFQ_ATTR(slice_idle),
4530        CFQ_ATTR(group_idle),
4531        CFQ_ATTR(low_latency),
4532        CFQ_ATTR(target_latency),
4533        __ATTR_NULL
4534};
4535
4536static struct elevator_type iosched_cfq = {
4537        .ops = {
4538                .elevator_merge_fn =            cfq_merge,
4539                .elevator_merged_fn =           cfq_merged_request,
4540                .elevator_merge_req_fn =        cfq_merged_requests,
4541                .elevator_allow_merge_fn =      cfq_allow_merge,
4542                .elevator_bio_merged_fn =       cfq_bio_merged,
4543                .elevator_dispatch_fn =         cfq_dispatch_requests,
4544                .elevator_add_req_fn =          cfq_insert_request,
4545                .elevator_activate_req_fn =     cfq_activate_request,
4546                .elevator_deactivate_req_fn =   cfq_deactivate_request,
4547                .elevator_completed_req_fn =    cfq_completed_request,
4548                .elevator_former_req_fn =       elv_rb_former_request,
4549                .elevator_latter_req_fn =       elv_rb_latter_request,
4550                .elevator_init_icq_fn =         cfq_init_icq,
4551                .elevator_exit_icq_fn =         cfq_exit_icq,
4552                .elevator_set_req_fn =          cfq_set_request,
4553                .elevator_put_req_fn =          cfq_put_request,
4554                .elevator_may_queue_fn =        cfq_may_queue,
4555                .elevator_init_fn =             cfq_init_queue,
4556                .elevator_exit_fn =             cfq_exit_queue,
4557        },
4558        .icq_size       =       sizeof(struct cfq_io_cq),
4559        .icq_align      =       __alignof__(struct cfq_io_cq),
4560        .elevator_attrs =       cfq_attrs,
4561        .elevator_name  =       "cfq",
4562        .elevator_owner =       THIS_MODULE,
4563};
4564
4565#ifdef CONFIG_CFQ_GROUP_IOSCHED
4566static struct blkcg_policy blkcg_policy_cfq = {
4567        .pd_size                = sizeof(struct cfq_group),
4568        .cftypes                = cfq_blkcg_files,
4569
4570        .pd_init_fn             = cfq_pd_init,
4571        .pd_offline_fn          = cfq_pd_offline,
4572        .pd_reset_stats_fn      = cfq_pd_reset_stats,
4573};
4574#endif
4575
4576static int __init cfq_init(void)
4577{
4578        int ret;
4579
4580        /*
4581         * could be 0 on HZ < 1000 setups
4582         */
4583        if (!cfq_slice_async)
4584                cfq_slice_async = 1;
4585        if (!cfq_slice_idle)
4586                cfq_slice_idle = 1;
4587
4588#ifdef CONFIG_CFQ_GROUP_IOSCHED
4589        if (!cfq_group_idle)
4590                cfq_group_idle = 1;
4591
4592        ret = blkcg_policy_register(&blkcg_policy_cfq);
4593        if (ret)
4594                return ret;
4595#else
4596        cfq_group_idle = 0;
4597#endif
4598
4599        ret = -ENOMEM;
4600        cfq_pool = KMEM_CACHE(cfq_queue, 0);
4601        if (!cfq_pool)
4602                goto err_pol_unreg;
4603
4604        ret = elv_register(&iosched_cfq);
4605        if (ret)
4606                goto err_free_pool;
4607
4608        return 0;
4609
4610err_free_pool:
4611        kmem_cache_destroy(cfq_pool);
4612err_pol_unreg:
4613#ifdef CONFIG_CFQ_GROUP_IOSCHED
4614        blkcg_policy_unregister(&blkcg_policy_cfq);
4615#endif
4616        return ret;
4617}
4618
4619static void __exit cfq_exit(void)
4620{
4621#ifdef CONFIG_CFQ_GROUP_IOSCHED
4622        blkcg_policy_unregister(&blkcg_policy_cfq);
4623#endif
4624        elv_unregister(&iosched_cfq);
4625        kmem_cache_destroy(cfq_pool);
4626}
4627
4628module_init(cfq_init);
4629module_exit(cfq_exit);
4630
4631MODULE_AUTHOR("Jens Axboe");
4632MODULE_LICENSE("GPL");
4633MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
4634
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.