1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
13#include <linux/jiffies.h>
14#include <linux/rbtree.h>
15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h>
17#include "cfq.h"
18
19
20
21
22
23static const int cfq_quantum = 8;
24static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
25
26static const int cfq_back_max = 16 * 1024;
27
28static const int cfq_back_penalty = 2;
29static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125;
33static int cfq_group_idle = HZ / 125;
34static const int cfq_target_latency = HZ * 3/10;
35static const int cfq_hist_divisor = 4;
36
37
38
39
40#define CFQ_IDLE_DELAY (HZ / 5)
41
42
43
44
45#define CFQ_MIN_TT (2)
46
47#define CFQ_SLICE_SCALE (5)
48#define CFQ_HW_QUEUE_MIN (5)
49#define CFQ_SERVICE_SHIFT 12
50
51#define CFQQ_SEEK_THR (sector_t)(8 * 100)
52#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
53#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
54#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
55
56#define RQ_CIC(rq) \
57 ((struct cfq_io_context *) (rq)->elevator_private[0])
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2])
60
61static struct kmem_cache *cfq_pool;
62static struct kmem_cache *cfq_ioc_pool;
63
64static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
65static struct completion *ioc_gone;
66static DEFINE_SPINLOCK(ioc_gone_lock);
67
68static DEFINE_SPINLOCK(cic_index_lock);
69static DEFINE_IDA(cic_index_ida);
70
71#define CFQ_PRIO_LISTS IOPRIO_BE_NR
72#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
73#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
74
75#define sample_valid(samples) ((samples) > 80)
76#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
77
78
79
80
81
82
83
84struct cfq_rb_root {
85 struct rb_root rb;
86 struct rb_node *left;
87 unsigned count;
88 unsigned total_weight;
89 u64 min_vdisktime;
90};
91#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
92 .count = 0, .min_vdisktime = 0, }
93
94
95
96
97struct cfq_queue {
98
99 int ref;
100
101 unsigned int flags;
102
103 struct cfq_data *cfqd;
104
105 struct rb_node rb_node;
106
107 unsigned long rb_key;
108
109 struct rb_node p_node;
110
111 struct rb_root *p_root;
112
113 struct rb_root sort_list;
114
115 struct request *next_rq;
116
117 int queued[2];
118
119 int allocated[2];
120
121 struct list_head fifo;
122
123
124 unsigned long dispatch_start;
125 unsigned int allocated_slice;
126 unsigned int slice_dispatch;
127
128 unsigned long slice_start;
129 unsigned long slice_end;
130 long slice_resid;
131
132
133 int meta_pending;
134
135 int dispatched;
136
137
138 unsigned short ioprio, org_ioprio;
139 unsigned short ioprio_class, org_ioprio_class;
140
141 pid_t pid;
142
143 u32 seek_history;
144 sector_t last_request_pos;
145
146 struct cfq_rb_root *service_tree;
147 struct cfq_queue *new_cfqq;
148 struct cfq_group *cfqg;
149
150 unsigned long nr_sectors;
151};
152
153
154
155
156
157enum wl_prio_t {
158 BE_WORKLOAD = 0,
159 RT_WORKLOAD = 1,
160 IDLE_WORKLOAD = 2,
161 CFQ_PRIO_NR,
162};
163
164
165
166
167enum wl_type_t {
168 ASYNC_WORKLOAD = 0,
169 SYNC_NOIDLE_WORKLOAD = 1,
170 SYNC_WORKLOAD = 2
171};
172
173
174struct cfq_group {
175
176 struct rb_node rb_node;
177
178
179 u64 vdisktime;
180 unsigned int weight;
181 unsigned int new_weight;
182 bool needs_update;
183
184
185 int nr_cfqq;
186
187
188
189
190
191
192
193 unsigned int busy_queues_avg[CFQ_PRIO_NR];
194
195
196
197
198
199
200
201
202 struct cfq_rb_root service_trees[2][3];
203 struct cfq_rb_root service_tree_idle;
204
205 unsigned long saved_workload_slice;
206 enum wl_type_t saved_workload;
207 enum wl_prio_t saved_serving_prio;
208 struct blkio_group blkg;
209#ifdef CONFIG_CFQ_GROUP_IOSCHED
210 struct hlist_node cfqd_node;
211 int ref;
212#endif
213
214 int dispatched;
215};
216
217
218
219
220struct cfq_data {
221 struct request_queue *queue;
222
223 struct cfq_rb_root grp_service_tree;
224 struct cfq_group root_group;
225
226
227
228
229 enum wl_prio_t serving_prio;
230 enum wl_type_t serving_type;
231 unsigned long workload_expires;
232 struct cfq_group *serving_group;
233
234
235
236
237
238
239 struct rb_root prio_trees[CFQ_PRIO_LISTS];
240
241 unsigned int busy_queues;
242 unsigned int busy_sync_queues;
243
244 int rq_in_driver;
245 int rq_in_flight[2];
246
247
248
249
250 int rq_queued;
251 int hw_tag;
252
253
254
255
256
257
258 int hw_tag_est_depth;
259 unsigned int hw_tag_samples;
260
261
262
263
264 struct timer_list idle_slice_timer;
265 struct work_struct unplug_work;
266
267 struct cfq_queue *active_queue;
268 struct cfq_io_context *active_cic;
269
270
271
272
273 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
274 struct cfq_queue *async_idle_cfqq;
275
276 sector_t last_position;
277
278
279
280
281 unsigned int cfq_quantum;
282 unsigned int cfq_fifo_expire[2];
283 unsigned int cfq_back_penalty;
284 unsigned int cfq_back_max;
285 unsigned int cfq_slice[2];
286 unsigned int cfq_slice_async_rq;
287 unsigned int cfq_slice_idle;
288 unsigned int cfq_group_idle;
289 unsigned int cfq_latency;
290
291 unsigned int cic_index;
292 struct list_head cic_list;
293
294
295
296
297 struct cfq_queue oom_cfqq;
298
299 unsigned long last_delayed_sync;
300
301
302 struct hlist_head cfqg_list;
303 struct rcu_head rcu;
304};
305
306static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
307
308static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
309 enum wl_prio_t prio,
310 enum wl_type_t type)
311{
312 if (!cfqg)
313 return NULL;
314
315 if (prio == IDLE_WORKLOAD)
316 return &cfqg->service_tree_idle;
317
318 return &cfqg->service_trees[prio][type];
319}
320
321enum cfqq_state_flags {
322 CFQ_CFQQ_FLAG_on_rr = 0,
323 CFQ_CFQQ_FLAG_wait_request,
324 CFQ_CFQQ_FLAG_must_dispatch,
325 CFQ_CFQQ_FLAG_must_alloc_slice,
326 CFQ_CFQQ_FLAG_fifo_expire,
327 CFQ_CFQQ_FLAG_idle_window,
328 CFQ_CFQQ_FLAG_prio_changed,
329 CFQ_CFQQ_FLAG_slice_new,
330 CFQ_CFQQ_FLAG_sync,
331 CFQ_CFQQ_FLAG_coop,
332 CFQ_CFQQ_FLAG_split_coop,
333 CFQ_CFQQ_FLAG_deep,
334 CFQ_CFQQ_FLAG_wait_busy,
335};
336
337#define CFQ_CFQQ_FNS(name) \
338static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
339{ \
340 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
341} \
342static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
343{ \
344 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
345} \
346static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
347{ \
348 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
349}
350
351CFQ_CFQQ_FNS(on_rr);
352CFQ_CFQQ_FNS(wait_request);
353CFQ_CFQQ_FNS(must_dispatch);
354CFQ_CFQQ_FNS(must_alloc_slice);
355CFQ_CFQQ_FNS(fifo_expire);
356CFQ_CFQQ_FNS(idle_window);
357CFQ_CFQQ_FNS(prio_changed);
358CFQ_CFQQ_FNS(slice_new);
359CFQ_CFQQ_FNS(sync);
360CFQ_CFQQ_FNS(coop);
361CFQ_CFQQ_FNS(split_coop);
362CFQ_CFQQ_FNS(deep);
363CFQ_CFQQ_FNS(wait_busy);
364#undef CFQ_CFQQ_FNS
365
366#ifdef CONFIG_CFQ_GROUP_IOSCHED
367#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
368 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
369 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
370 blkg_path(&(cfqq)->cfqg->blkg), ##args);
371
372#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
373 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
374 blkg_path(&(cfqg)->blkg), ##args); \
375
376#else
377#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
378 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
379#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0);
380#endif
381#define cfq_log(cfqd, fmt, args...) \
382 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
383
384
385#define for_each_cfqg_st(cfqg, i, j, st) \
386 for (i = 0; i <= IDLE_WORKLOAD; i++) \
387 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
388 : &cfqg->service_tree_idle; \
389 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
390 (i == IDLE_WORKLOAD && j == 0); \
391 j++, st = i < IDLE_WORKLOAD ? \
392 &cfqg->service_trees[i][j]: NULL) \
393
394
395static inline bool iops_mode(struct cfq_data *cfqd)
396{
397
398
399
400
401
402
403
404 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
405 return true;
406 else
407 return false;
408}
409
410static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
411{
412 if (cfq_class_idle(cfqq))
413 return IDLE_WORKLOAD;
414 if (cfq_class_rt(cfqq))
415 return RT_WORKLOAD;
416 return BE_WORKLOAD;
417}
418
419
420static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
421{
422 if (!cfq_cfqq_sync(cfqq))
423 return ASYNC_WORKLOAD;
424 if (!cfq_cfqq_idle_window(cfqq))
425 return SYNC_NOIDLE_WORKLOAD;
426 return SYNC_WORKLOAD;
427}
428
429static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
430 struct cfq_data *cfqd,
431 struct cfq_group *cfqg)
432{
433 if (wl == IDLE_WORKLOAD)
434 return cfqg->service_tree_idle.count;
435
436 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
437 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
438 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
439}
440
441static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
442 struct cfq_group *cfqg)
443{
444 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
445 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
446}
447
448static void cfq_dispatch_insert(struct request_queue *, struct request *);
449static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
450 struct io_context *, gfp_t);
451static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
452 struct io_context *);
453
454static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
455 bool is_sync)
456{
457 return cic->cfqq[is_sync];
458}
459
460static inline void cic_set_cfqq(struct cfq_io_context *cic,
461 struct cfq_queue *cfqq, bool is_sync)
462{
463 cic->cfqq[is_sync] = cfqq;
464}
465
466#define CIC_DEAD_KEY 1ul
467#define CIC_DEAD_INDEX_SHIFT 1
468
469static inline void *cfqd_dead_key(struct cfq_data *cfqd)
470{
471 return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
472}
473
474static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
475{
476 struct cfq_data *cfqd = cic->key;
477
478 if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
479 return NULL;
480
481 return cfqd;
482}
483
484
485
486
487
488static inline bool cfq_bio_sync(struct bio *bio)
489{
490 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
491}
492
493
494
495
496
497static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
498{
499 if (cfqd->busy_queues) {
500 cfq_log(cfqd, "schedule dispatch");
501 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
502 }
503}
504
505
506
507
508
509
510static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
511 unsigned short prio)
512{
513 const int base_slice = cfqd->cfq_slice[sync];
514
515 WARN_ON(prio >= IOPRIO_BE_NR);
516
517 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
518}
519
520static inline int
521cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
522{
523 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
524}
525
526static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
527{
528 u64 d = delta << CFQ_SERVICE_SHIFT;
529
530 d = d * BLKIO_WEIGHT_DEFAULT;
531 do_div(d, cfqg->weight);
532 return d;
533}
534
535static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
536{
537 s64 delta = (s64)(vdisktime - min_vdisktime);
538 if (delta > 0)
539 min_vdisktime = vdisktime;
540
541 return min_vdisktime;
542}
543
544static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
545{
546 s64 delta = (s64)(vdisktime - min_vdisktime);
547 if (delta < 0)
548 min_vdisktime = vdisktime;
549
550 return min_vdisktime;
551}
552
553static void update_min_vdisktime(struct cfq_rb_root *st)
554{
555 struct cfq_group *cfqg;
556
557 if (st->left) {
558 cfqg = rb_entry_cfqg(st->left);
559 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
560 cfqg->vdisktime);
561 }
562}
563
564
565
566
567
568
569
570static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
571 struct cfq_group *cfqg, bool rt)
572{
573 unsigned min_q, max_q;
574 unsigned mult = cfq_hist_divisor - 1;
575 unsigned round = cfq_hist_divisor / 2;
576 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
577
578 min_q = min(cfqg->busy_queues_avg[rt], busy);
579 max_q = max(cfqg->busy_queues_avg[rt], busy);
580 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
581 cfq_hist_divisor;
582 return cfqg->busy_queues_avg[rt];
583}
584
585static inline unsigned
586cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
587{
588 struct cfq_rb_root *st = &cfqd->grp_service_tree;
589
590 return cfq_target_latency * cfqg->weight / st->total_weight;
591}
592
593static inline unsigned
594cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
595{
596 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
597 if (cfqd->cfq_latency) {
598
599
600
601
602 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
603 cfq_class_rt(cfqq));
604 unsigned sync_slice = cfqd->cfq_slice[1];
605 unsigned expect_latency = sync_slice * iq;
606 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
607
608 if (expect_latency > group_slice) {
609 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
610
611
612 unsigned low_slice =
613 min(slice, base_low_slice * slice / sync_slice);
614
615
616 slice = max(slice * group_slice / expect_latency,
617 low_slice);
618 }
619 }
620 return slice;
621}
622
623static inline void
624cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
625{
626 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
627
628 cfqq->slice_start = jiffies;
629 cfqq->slice_end = jiffies + slice;
630 cfqq->allocated_slice = slice;
631 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
632}
633
634
635
636
637
638
639static inline bool cfq_slice_used(struct cfq_queue *cfqq)
640{
641 if (cfq_cfqq_slice_new(cfqq))
642 return false;
643 if (time_before(jiffies, cfqq->slice_end))
644 return false;
645
646 return true;
647}
648
649
650
651
652
653
654static struct request *
655cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
656{
657 sector_t s1, s2, d1 = 0, d2 = 0;
658 unsigned long back_max;
659#define CFQ_RQ1_WRAP 0x01
660#define CFQ_RQ2_WRAP 0x02
661 unsigned wrap = 0;
662
663 if (rq1 == NULL || rq1 == rq2)
664 return rq2;
665 if (rq2 == NULL)
666 return rq1;
667
668 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
669 return rq1;
670 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
671 return rq2;
672 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
673 return rq1;
674 else if ((rq2->cmd_flags & REQ_META) &&
675 !(rq1->cmd_flags & REQ_META))
676 return rq2;
677
678 s1 = blk_rq_pos(rq1);
679 s2 = blk_rq_pos(rq2);
680
681
682
683
684 back_max = cfqd->cfq_back_max * 2;
685
686
687
688
689
690
691 if (s1 >= last)
692 d1 = s1 - last;
693 else if (s1 + back_max >= last)
694 d1 = (last - s1) * cfqd->cfq_back_penalty;
695 else
696 wrap |= CFQ_RQ1_WRAP;
697
698 if (s2 >= last)
699 d2 = s2 - last;
700 else if (s2 + back_max >= last)
701 d2 = (last - s2) * cfqd->cfq_back_penalty;
702 else
703 wrap |= CFQ_RQ2_WRAP;
704
705
706
707
708
709
710
711 switch (wrap) {
712 case 0:
713 if (d1 < d2)
714 return rq1;
715 else if (d2 < d1)
716 return rq2;
717 else {
718 if (s1 >= s2)
719 return rq1;
720 else
721 return rq2;
722 }
723
724 case CFQ_RQ2_WRAP:
725 return rq1;
726 case CFQ_RQ1_WRAP:
727 return rq2;
728 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP):
729 default:
730
731
732
733
734
735
736 if (s1 <= s2)
737 return rq1;
738 else
739 return rq2;
740 }
741}
742
743
744
745
746static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
747{
748
749 if (!root->count)
750 return NULL;
751
752 if (!root->left)
753 root->left = rb_first(&root->rb);
754
755 if (root->left)
756 return rb_entry(root->left, struct cfq_queue, rb_node);
757
758 return NULL;
759}
760
761static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
762{
763 if (!root->left)
764 root->left = rb_first(&root->rb);
765
766 if (root->left)
767 return rb_entry_cfqg(root->left);
768
769 return NULL;
770}
771
772static void rb_erase_init(struct rb_node *n, struct rb_root *root)
773{
774 rb_erase(n, root);
775 RB_CLEAR_NODE(n);
776}
777
778static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
779{
780 if (root->left == n)
781 root->left = NULL;
782 rb_erase_init(n, &root->rb);
783 --root->count;
784}
785
786
787
788
789static struct request *
790cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
791 struct request *last)
792{
793 struct rb_node *rbnext = rb_next(&last->rb_node);
794 struct rb_node *rbprev = rb_prev(&last->rb_node);
795 struct request *next = NULL, *prev = NULL;
796
797 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
798
799 if (rbprev)
800 prev = rb_entry_rq(rbprev);
801
802 if (rbnext)
803 next = rb_entry_rq(rbnext);
804 else {
805 rbnext = rb_first(&cfqq->sort_list);
806 if (rbnext && rbnext != &last->rb_node)
807 next = rb_entry_rq(rbnext);
808 }
809
810 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
811}
812
813static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
814 struct cfq_queue *cfqq)
815{
816
817
818
819 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
820 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
821}
822
823static inline s64
824cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
825{
826 return cfqg->vdisktime - st->min_vdisktime;
827}
828
829static void
830__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
831{
832 struct rb_node **node = &st->rb.rb_node;
833 struct rb_node *parent = NULL;
834 struct cfq_group *__cfqg;
835 s64 key = cfqg_key(st, cfqg);
836 int left = 1;
837
838 while (*node != NULL) {
839 parent = *node;
840 __cfqg = rb_entry_cfqg(parent);
841
842 if (key < cfqg_key(st, __cfqg))
843 node = &parent->rb_left;
844 else {
845 node = &parent->rb_right;
846 left = 0;
847 }
848 }
849
850 if (left)
851 st->left = &cfqg->rb_node;
852
853 rb_link_node(&cfqg->rb_node, parent, node);
854 rb_insert_color(&cfqg->rb_node, &st->rb);
855}
856
857static void
858cfq_update_group_weight(struct cfq_group *cfqg)
859{
860 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
861 if (cfqg->needs_update) {
862 cfqg->weight = cfqg->new_weight;
863 cfqg->needs_update = false;
864 }
865}
866
867static void
868cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
869{
870 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
871
872 cfq_update_group_weight(cfqg);
873 __cfq_group_service_tree_add(st, cfqg);
874 st->total_weight += cfqg->weight;
875}
876
877static void
878cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
879{
880 struct cfq_rb_root *st = &cfqd->grp_service_tree;
881 struct cfq_group *__cfqg;
882 struct rb_node *n;
883
884 cfqg->nr_cfqq++;
885 if (!RB_EMPTY_NODE(&cfqg->rb_node))
886 return;
887
888
889
890
891
892
893 n = rb_last(&st->rb);
894 if (n) {
895 __cfqg = rb_entry_cfqg(n);
896 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
897 } else
898 cfqg->vdisktime = st->min_vdisktime;
899 cfq_group_service_tree_add(st, cfqg);
900}
901
902static void
903cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
904{
905 st->total_weight -= cfqg->weight;
906 if (!RB_EMPTY_NODE(&cfqg->rb_node))
907 cfq_rb_erase(&cfqg->rb_node, st);
908}
909
910static void
911cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
912{
913 struct cfq_rb_root *st = &cfqd->grp_service_tree;
914
915 BUG_ON(cfqg->nr_cfqq < 1);
916 cfqg->nr_cfqq--;
917
918
919 if (cfqg->nr_cfqq)
920 return;
921
922 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
923 cfq_group_service_tree_del(st, cfqg);
924 cfqg->saved_workload_slice = 0;
925 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
926}
927
928static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
929 unsigned int *unaccounted_time)
930{
931 unsigned int slice_used;
932
933
934
935
936
937 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
938
939
940
941
942
943
944 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
945 1);
946 } else {
947 slice_used = jiffies - cfqq->slice_start;
948 if (slice_used > cfqq->allocated_slice) {
949 *unaccounted_time = slice_used - cfqq->allocated_slice;
950 slice_used = cfqq->allocated_slice;
951 }
952 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
953 *unaccounted_time += cfqq->slice_start -
954 cfqq->dispatch_start;
955 }
956
957 return slice_used;
958}
959
960static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
961 struct cfq_queue *cfqq)
962{
963 struct cfq_rb_root *st = &cfqd->grp_service_tree;
964 unsigned int used_sl, charge, unaccounted_sl = 0;
965 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
966 - cfqg->service_tree_idle.count;
967
968 BUG_ON(nr_sync < 0);
969 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
970
971 if (iops_mode(cfqd))
972 charge = cfqq->slice_dispatch;
973 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
974 charge = cfqq->allocated_slice;
975
976
977 cfq_group_service_tree_del(st, cfqg);
978 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
979
980 cfq_group_service_tree_add(st, cfqg);
981
982
983 if (time_after(cfqd->workload_expires, jiffies)) {
984 cfqg->saved_workload_slice = cfqd->workload_expires
985 - jiffies;
986 cfqg->saved_workload = cfqd->serving_type;
987 cfqg->saved_serving_prio = cfqd->serving_prio;
988 } else
989 cfqg->saved_workload_slice = 0;
990
991 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
992 st->min_vdisktime);
993 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
994 " sect=%u", used_sl, cfqq->slice_dispatch, charge,
995 iops_mode(cfqd), cfqq->nr_sectors);
996 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
997 unaccounted_sl);
998 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
999}
1000
1001#ifdef CONFIG_CFQ_GROUP_IOSCHED
1002static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1003{
1004 if (blkg)
1005 return container_of(blkg, struct cfq_group, blkg);
1006 return NULL;
1007}
1008
1009void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1010 unsigned int weight)
1011{
1012 struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1013 cfqg->new_weight = weight;
1014 cfqg->needs_update = true;
1015}
1016
1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1018 struct blkio_cgroup *blkcg, int create)
1019{
1020 struct cfq_group *cfqg = NULL;
1021 void *key = cfqd;
1022 int i, j;
1023 struct cfq_rb_root *st;
1024 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1025 unsigned int major, minor;
1026
1027 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1028 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1029 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030 cfqg->blkg.dev = MKDEV(major, minor);
1031 goto done;
1032 }
1033 if (cfqg || !create)
1034 goto done;
1035
1036 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1037 if (!cfqg)
1038 goto done;
1039
1040 for_each_cfqg_st(cfqg, i, j, st)
1041 *st = CFQ_RB_ROOT;
1042 RB_CLEAR_NODE(&cfqg->rb_node);
1043
1044
1045
1046
1047
1048
1049
1050 cfqg->ref = 1;
1051
1052
1053
1054
1055
1056
1057
1058 if (bdi->dev) {
1059 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1060 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1061 MKDEV(major, minor));
1062 } else
1063 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1064 0);
1065
1066 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1067
1068
1069 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1070
1071done:
1072 return cfqg;
1073}
1074
1075
1076
1077
1078
1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1080{
1081 struct blkio_cgroup *blkcg;
1082 struct cfq_group *cfqg = NULL;
1083
1084 rcu_read_lock();
1085 blkcg = task_blkio_cgroup(current);
1086 cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
1087 if (!cfqg && create)
1088 cfqg = &cfqd->root_group;
1089 rcu_read_unlock();
1090 return cfqg;
1091}
1092
1093static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1094{
1095 cfqg->ref++;
1096 return cfqg;
1097}
1098
1099static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1100{
1101
1102 if (!cfq_cfqq_sync(cfqq))
1103 cfqg = &cfqq->cfqd->root_group;
1104
1105 cfqq->cfqg = cfqg;
1106
1107 cfqq->cfqg->ref++;
1108}
1109
1110static void cfq_put_cfqg(struct cfq_group *cfqg)
1111{
1112 struct cfq_rb_root *st;
1113 int i, j;
1114
1115 BUG_ON(cfqg->ref <= 0);
1116 cfqg->ref--;
1117 if (cfqg->ref)
1118 return;
1119 for_each_cfqg_st(cfqg, i, j, st)
1120 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1121 kfree(cfqg);
1122}
1123
1124static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1125{
1126
1127 BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1128
1129 hlist_del_init(&cfqg->cfqd_node);
1130
1131
1132
1133
1134
1135 cfq_put_cfqg(cfqg);
1136}
1137
1138static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1139{
1140 struct hlist_node *pos, *n;
1141 struct cfq_group *cfqg;
1142
1143 hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1144
1145
1146
1147
1148
1149 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1150 cfq_destroy_cfqg(cfqd, cfqg);
1151 }
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1169{
1170 unsigned long flags;
1171 struct cfq_data *cfqd = key;
1172
1173 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1174 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1175 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1176}
1177
1178#else
1179static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1180{
1181 return &cfqd->root_group;
1182}
1183
1184static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1185{
1186 return cfqg;
1187}
1188
1189static inline void
1190cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1191 cfqq->cfqg = cfqg;
1192}
1193
1194static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1195static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1196
1197#endif
1198
1199
1200
1201
1202
1203
1204static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1205 bool add_front)
1206{
1207 struct rb_node **p, *parent;
1208 struct cfq_queue *__cfqq;
1209 unsigned long rb_key;
1210 struct cfq_rb_root *service_tree;
1211 int left;
1212 int new_cfqq = 1;
1213 int group_changed = 0;
1214
1215 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1216 cfqq_type(cfqq));
1217 if (cfq_class_idle(cfqq)) {
1218 rb_key = CFQ_IDLE_DELAY;
1219 parent = rb_last(&service_tree->rb);
1220 if (parent && parent != &cfqq->rb_node) {
1221 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1222 rb_key += __cfqq->rb_key;
1223 } else
1224 rb_key += jiffies;
1225 } else if (!add_front) {
1226
1227
1228
1229
1230
1231
1232 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1233 rb_key -= cfqq->slice_resid;
1234 cfqq->slice_resid = 0;
1235 } else {
1236 rb_key = -HZ;
1237 __cfqq = cfq_rb_first(service_tree);
1238 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1239 }
1240
1241 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1242 new_cfqq = 0;
1243
1244
1245
1246 if (rb_key == cfqq->rb_key &&
1247 cfqq->service_tree == service_tree)
1248 return;
1249
1250 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1251 cfqq->service_tree = NULL;
1252 }
1253
1254 left = 1;
1255 parent = NULL;
1256 cfqq->service_tree = service_tree;
1257 p = &service_tree->rb.rb_node;
1258 while (*p) {
1259 struct rb_node **n;
1260
1261 parent = *p;
1262 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1263
1264
1265
1266
1267 if (time_before(rb_key, __cfqq->rb_key))
1268 n = &(*p)->rb_left;
1269 else {
1270 n = &(*p)->rb_right;
1271 left = 0;
1272 }
1273
1274 p = n;
1275 }
1276
1277 if (left)
1278 service_tree->left = &cfqq->rb_node;
1279
1280 cfqq->rb_key = rb_key;
1281 rb_link_node(&cfqq->rb_node, parent, p);
1282 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1283 service_tree->count++;
1284 if ((add_front || !new_cfqq) && !group_changed)
1285 return;
1286 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1287}
1288
1289static struct cfq_queue *
1290cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1291 sector_t sector, struct rb_node **ret_parent,
1292 struct rb_node ***rb_link)
1293{
1294 struct rb_node **p, *parent;
1295 struct cfq_queue *cfqq = NULL;
1296
1297 parent = NULL;
1298 p = &root->rb_node;
1299 while (*p) {
1300 struct rb_node **n;
1301
1302 parent = *p;
1303 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1304
1305
1306
1307
1308
1309 if (sector > blk_rq_pos(cfqq->next_rq))
1310 n = &(*p)->rb_right;
1311 else if (sector < blk_rq_pos(cfqq->next_rq))
1312 n = &(*p)->rb_left;
1313 else
1314 break;
1315 p = n;
1316 cfqq = NULL;
1317 }
1318
1319 *ret_parent = parent;
1320 if (rb_link)
1321 *rb_link = p;
1322 return cfqq;
1323}
1324
1325static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1326{
1327 struct rb_node **p, *parent;
1328 struct cfq_queue *__cfqq;
1329
1330 if (cfqq->p_root) {
1331 rb_erase(&cfqq->p_node, cfqq->p_root);
1332 cfqq->p_root = NULL;
1333 }
1334
1335 if (cfq_class_idle(cfqq))
1336 return;
1337 if (!cfqq->next_rq)
1338 return;
1339
1340 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1341 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1342 blk_rq_pos(cfqq->next_rq), &parent, &p);
1343 if (!__cfqq) {
1344 rb_link_node(&cfqq->p_node, parent, p);
1345 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1346 } else
1347 cfqq->p_root = NULL;
1348}
1349
1350
1351
1352
1353static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1354{
1355
1356
1357
1358 if (cfq_cfqq_on_rr(cfqq)) {
1359 cfq_service_tree_add(cfqd, cfqq, 0);
1360 cfq_prio_tree_add(cfqd, cfqq);
1361 }
1362}
1363
1364
1365
1366
1367
1368static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1369{
1370 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1371 BUG_ON(cfq_cfqq_on_rr(cfqq));
1372 cfq_mark_cfqq_on_rr(cfqq);
1373 cfqd->busy_queues++;
1374 if (cfq_cfqq_sync(cfqq))
1375 cfqd->busy_sync_queues++;
1376
1377 cfq_resort_rr_list(cfqd, cfqq);
1378}
1379
1380
1381
1382
1383
1384static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1385{
1386 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1387 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1388 cfq_clear_cfqq_on_rr(cfqq);
1389
1390 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1391 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1392 cfqq->service_tree = NULL;
1393 }
1394 if (cfqq->p_root) {
1395 rb_erase(&cfqq->p_node, cfqq->p_root);
1396 cfqq->p_root = NULL;
1397 }
1398
1399 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1400 BUG_ON(!cfqd->busy_queues);
1401 cfqd->busy_queues--;
1402 if (cfq_cfqq_sync(cfqq))
1403 cfqd->busy_sync_queues--;
1404}
1405
1406
1407
1408
1409static void cfq_del_rq_rb(struct request *rq)
1410{
1411 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1412 const int sync = rq_is_sync(rq);
1413
1414 BUG_ON(!cfqq->queued[sync]);
1415 cfqq->queued[sync]--;
1416
1417 elv_rb_del(&cfqq->sort_list, rq);
1418
1419 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1420
1421
1422
1423
1424
1425 if (cfqq->p_root) {
1426 rb_erase(&cfqq->p_node, cfqq->p_root);
1427 cfqq->p_root = NULL;
1428 }
1429 }
1430}
1431
1432static void cfq_add_rq_rb(struct request *rq)
1433{
1434 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1435 struct cfq_data *cfqd = cfqq->cfqd;
1436 struct request *__alias, *prev;
1437
1438 cfqq->queued[rq_is_sync(rq)]++;
1439
1440
1441
1442
1443
1444 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1445 cfq_dispatch_insert(cfqd->queue, __alias);
1446
1447 if (!cfq_cfqq_on_rr(cfqq))
1448 cfq_add_cfqq_rr(cfqd, cfqq);
1449
1450
1451
1452
1453 prev = cfqq->next_rq;
1454 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1455
1456
1457
1458
1459 if (prev != cfqq->next_rq)
1460 cfq_prio_tree_add(cfqd, cfqq);
1461
1462 BUG_ON(!cfqq->next_rq);
1463}
1464
1465static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1466{
1467 elv_rb_del(&cfqq->sort_list, rq);
1468 cfqq->queued[rq_is_sync(rq)]--;
1469 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1470 rq_data_dir(rq), rq_is_sync(rq));
1471 cfq_add_rq_rb(rq);
1472 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1473 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1474 rq_is_sync(rq));
1475}
1476
1477static struct request *
1478cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1479{
1480 struct task_struct *tsk = current;
1481 struct cfq_io_context *cic;
1482 struct cfq_queue *cfqq;
1483
1484 cic = cfq_cic_lookup(cfqd, tsk->io_context);
1485 if (!cic)
1486 return NULL;
1487
1488 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1489 if (cfqq) {
1490 sector_t sector = bio->bi_sector + bio_sectors(bio);
1491
1492 return elv_rb_find(&cfqq->sort_list, sector);
1493 }
1494
1495 return NULL;
1496}
1497
1498static void cfq_activate_request(struct request_queue *q, struct request *rq)
1499{
1500 struct cfq_data *cfqd = q->elevator->elevator_data;
1501
1502 cfqd->rq_in_driver++;
1503 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1504 cfqd->rq_in_driver);
1505
1506 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1507}
1508
1509static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1510{
1511 struct cfq_data *cfqd = q->elevator->elevator_data;
1512
1513 WARN_ON(!cfqd->rq_in_driver);
1514 cfqd->rq_in_driver--;
1515 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1516 cfqd->rq_in_driver);
1517}
1518
1519static void cfq_remove_request(struct request *rq)
1520{
1521 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1522
1523 if (cfqq->next_rq == rq)
1524 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1525
1526 list_del_init(&rq->queuelist);
1527 cfq_del_rq_rb(rq);
1528
1529 cfqq->cfqd->rq_queued--;
1530 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1531 rq_data_dir(rq), rq_is_sync(rq));
1532 if (rq->cmd_flags & REQ_META) {
1533 WARN_ON(!cfqq->meta_pending);
1534 cfqq->meta_pending--;
1535 }
1536}
1537
1538static int cfq_merge(struct request_queue *q, struct request **req,
1539 struct bio *bio)
1540{
1541 struct cfq_data *cfqd = q->elevator->elevator_data;
1542 struct request *__rq;
1543
1544 __rq = cfq_find_rq_fmerge(cfqd, bio);
1545 if (__rq && elv_rq_merge_ok(__rq, bio)) {
1546 *req = __rq;
1547 return ELEVATOR_FRONT_MERGE;
1548 }
1549
1550 return ELEVATOR_NO_MERGE;
1551}
1552
1553static void cfq_merged_request(struct request_queue *q, struct request *req,
1554 int type)
1555{
1556 if (type == ELEVATOR_FRONT_MERGE) {
1557 struct cfq_queue *cfqq = RQ_CFQQ(req);
1558
1559 cfq_reposition_rq_rb(cfqq, req);
1560 }
1561}
1562
1563static void cfq_bio_merged(struct request_queue *q, struct request *req,
1564 struct bio *bio)
1565{
1566 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1567 bio_data_dir(bio), cfq_bio_sync(bio));
1568}
1569
1570static void
1571cfq_merged_requests(struct request_queue *q, struct request *rq,
1572 struct request *next)
1573{
1574 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1575
1576
1577
1578 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1579 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1580 list_move(&rq->queuelist, &next->queuelist);
1581 rq_set_fifo_time(rq, rq_fifo_time(next));
1582 }
1583
1584 if (cfqq->next_rq == next)
1585 cfqq->next_rq = rq;
1586 cfq_remove_request(next);
1587 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1588 rq_data_dir(next), rq_is_sync(next));
1589}
1590
1591static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1592 struct bio *bio)
1593{
1594 struct cfq_data *cfqd = q->elevator->elevator_data;
1595 struct cfq_io_context *cic;
1596 struct cfq_queue *cfqq;
1597
1598
1599
1600
1601 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1602 return false;
1603
1604
1605
1606
1607
1608 cic = cfq_cic_lookup(cfqd, current->io_context);
1609 if (!cic)
1610 return false;
1611
1612 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1613 return cfqq == RQ_CFQQ(rq);
1614}
1615
1616static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1617{
1618 del_timer(&cfqd->idle_slice_timer);
1619 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1620}
1621
1622static void __cfq_set_active_queue(struct cfq_data *cfqd,
1623 struct cfq_queue *cfqq)
1624{
1625 if (cfqq) {
1626 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1627 cfqd->serving_prio, cfqd->serving_type);
1628 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1629 cfqq->slice_start = 0;
1630 cfqq->dispatch_start = jiffies;
1631 cfqq->allocated_slice = 0;
1632 cfqq->slice_end = 0;
1633 cfqq->slice_dispatch = 0;
1634 cfqq->nr_sectors = 0;
1635
1636 cfq_clear_cfqq_wait_request(cfqq);
1637 cfq_clear_cfqq_must_dispatch(cfqq);
1638 cfq_clear_cfqq_must_alloc_slice(cfqq);
1639 cfq_clear_cfqq_fifo_expire(cfqq);
1640 cfq_mark_cfqq_slice_new(cfqq);
1641
1642 cfq_del_timer(cfqd, cfqq);
1643 }
1644
1645 cfqd->active_queue = cfqq;
1646}
1647
1648
1649
1650
1651static void
1652__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1653 bool timed_out)
1654{
1655 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1656
1657 if (cfq_cfqq_wait_request(cfqq))
1658 cfq_del_timer(cfqd, cfqq);
1659
1660 cfq_clear_cfqq_wait_request(cfqq);
1661 cfq_clear_cfqq_wait_busy(cfqq);
1662
1663
1664
1665
1666
1667
1668
1669 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1670 cfq_mark_cfqq_split_coop(cfqq);
1671
1672
1673
1674
1675 if (timed_out) {
1676 if (cfq_cfqq_slice_new(cfqq))
1677 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1678 else
1679 cfqq->slice_resid = cfqq->slice_end - jiffies;
1680 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1681 }
1682
1683 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1684
1685 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1686 cfq_del_cfqq_rr(cfqd, cfqq);
1687
1688 cfq_resort_rr_list(cfqd, cfqq);
1689
1690 if (cfqq == cfqd->active_queue)
1691 cfqd->active_queue = NULL;
1692
1693 if (cfqd->active_cic) {
1694 put_io_context(cfqd->active_cic->ioc);
1695 cfqd->active_cic = NULL;
1696 }
1697}
1698
1699static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1700{
1701 struct cfq_queue *cfqq = cfqd->active_queue;
1702
1703 if (cfqq)
1704 __cfq_slice_expired(cfqd, cfqq, timed_out);
1705}
1706
1707
1708
1709
1710
1711static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1712{
1713 struct cfq_rb_root *service_tree =
1714 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1715 cfqd->serving_type);
1716
1717 if (!cfqd->rq_queued)
1718 return NULL;
1719
1720
1721 if (!service_tree)
1722 return NULL;
1723 if (RB_EMPTY_ROOT(&service_tree->rb))
1724 return NULL;
1725 return cfq_rb_first(service_tree);
1726}
1727
1728static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1729{
1730 struct cfq_group *cfqg;
1731 struct cfq_queue *cfqq;
1732 int i, j;
1733 struct cfq_rb_root *st;
1734
1735 if (!cfqd->rq_queued)
1736 return NULL;
1737
1738 cfqg = cfq_get_next_cfqg(cfqd);
1739 if (!cfqg)
1740 return NULL;
1741
1742 for_each_cfqg_st(cfqg, i, j, st)
1743 if ((cfqq = cfq_rb_first(st)) != NULL)
1744 return cfqq;
1745 return NULL;
1746}
1747
1748
1749
1750
1751static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1752 struct cfq_queue *cfqq)
1753{
1754 if (!cfqq)
1755 cfqq = cfq_get_next_queue(cfqd);
1756
1757 __cfq_set_active_queue(cfqd, cfqq);
1758 return cfqq;
1759}
1760
1761static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1762 struct request *rq)
1763{
1764 if (blk_rq_pos(rq) >= cfqd->last_position)
1765 return blk_rq_pos(rq) - cfqd->last_position;
1766 else
1767 return cfqd->last_position - blk_rq_pos(rq);
1768}
1769
1770static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1771 struct request *rq)
1772{
1773 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1774}
1775
1776static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1777 struct cfq_queue *cur_cfqq)
1778{
1779 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1780 struct rb_node *parent, *node;
1781 struct cfq_queue *__cfqq;
1782 sector_t sector = cfqd->last_position;
1783
1784 if (RB_EMPTY_ROOT(root))
1785 return NULL;
1786
1787
1788
1789
1790
1791 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1792 if (__cfqq)
1793 return __cfqq;
1794
1795
1796
1797
1798
1799 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1800 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1801 return __cfqq;
1802
1803 if (blk_rq_pos(__cfqq->next_rq) < sector)
1804 node = rb_next(&__cfqq->p_node);
1805 else
1806 node = rb_prev(&__cfqq->p_node);
1807 if (!node)
1808 return NULL;
1809
1810 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1811 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1812 return __cfqq;
1813
1814 return NULL;
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1828 struct cfq_queue *cur_cfqq)
1829{
1830 struct cfq_queue *cfqq;
1831
1832 if (cfq_class_idle(cur_cfqq))
1833 return NULL;
1834 if (!cfq_cfqq_sync(cur_cfqq))
1835 return NULL;
1836 if (CFQQ_SEEKY(cur_cfqq))
1837 return NULL;
1838
1839
1840
1841
1842 if (cur_cfqq->cfqg->nr_cfqq == 1)
1843 return NULL;
1844
1845
1846
1847
1848
1849
1850 cfqq = cfqq_close(cfqd, cur_cfqq);
1851 if (!cfqq)
1852 return NULL;
1853
1854
1855 if (cur_cfqq->cfqg != cfqq->cfqg)
1856 return NULL;
1857
1858
1859
1860
1861 if (!cfq_cfqq_sync(cfqq))
1862 return NULL;
1863 if (CFQQ_SEEKY(cfqq))
1864 return NULL;
1865
1866
1867
1868
1869 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1870 return NULL;
1871
1872 return cfqq;
1873}
1874
1875
1876
1877
1878
1879static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1880{
1881 enum wl_prio_t prio = cfqq_prio(cfqq);
1882 struct cfq_rb_root *service_tree = cfqq->service_tree;
1883
1884 BUG_ON(!service_tree);
1885 BUG_ON(!service_tree->count);
1886
1887 if (!cfqd->cfq_slice_idle)
1888 return false;
1889
1890
1891 if (prio == IDLE_WORKLOAD)
1892 return false;
1893
1894
1895 if (cfq_cfqq_idle_window(cfqq) &&
1896 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1897 return true;
1898
1899
1900
1901
1902
1903 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1904 return true;
1905 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1906 service_tree->count);
1907 return false;
1908}
1909
1910static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1911{
1912 struct cfq_queue *cfqq = cfqd->active_queue;
1913 struct cfq_io_context *cic;
1914 unsigned long sl, group_idle = 0;
1915
1916
1917
1918
1919
1920
1921 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1922 return;
1923
1924 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1925 WARN_ON(cfq_cfqq_slice_new(cfqq));
1926
1927
1928
1929
1930 if (!cfq_should_idle(cfqd, cfqq)) {
1931
1932 if (cfqd->cfq_group_idle)
1933 group_idle = cfqd->cfq_group_idle;
1934 else
1935 return;
1936 }
1937
1938
1939
1940
1941 if (cfqq->dispatched)
1942 return;
1943
1944
1945
1946
1947 cic = cfqd->active_cic;
1948 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1949 return;
1950
1951
1952
1953
1954
1955
1956 if (sample_valid(cic->ttime_samples) &&
1957 (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1958 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1959 cic->ttime_mean);
1960 return;
1961 }
1962
1963
1964 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1965 return;
1966
1967 cfq_mark_cfqq_wait_request(cfqq);
1968
1969 if (group_idle)
1970 sl = cfqd->cfq_group_idle;
1971 else
1972 sl = cfqd->cfq_slice_idle;
1973
1974 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1975 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1976 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1977 group_idle ? 1 : 0);
1978}
1979
1980
1981
1982
1983static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1984{
1985 struct cfq_data *cfqd = q->elevator->elevator_data;
1986 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1987
1988 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1989
1990 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1991 cfq_remove_request(rq);
1992 cfqq->dispatched++;
1993 (RQ_CFQG(rq))->dispatched++;
1994 elv_dispatch_sort(q, rq);
1995
1996 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1997 cfqq->nr_sectors += blk_rq_sectors(rq);
1998 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1999 rq_data_dir(rq), rq_is_sync(rq));
2000}
2001
2002
2003
2004
2005static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2006{
2007 struct request *rq = NULL;
2008
2009 if (cfq_cfqq_fifo_expire(cfqq))
2010 return NULL;
2011
2012 cfq_mark_cfqq_fifo_expire(cfqq);
2013
2014 if (list_empty(&cfqq->fifo))
2015 return NULL;
2016
2017 rq = rq_entry_fifo(cfqq->fifo.next);
2018 if (time_before(jiffies, rq_fifo_time(rq)))
2019 rq = NULL;
2020
2021 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2022 return rq;
2023}
2024
2025static inline int
2026cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2027{
2028 const int base_rq = cfqd->cfq_slice_async_rq;
2029
2030 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2031
2032 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
2033}
2034
2035
2036
2037
2038static int cfqq_process_refs(struct cfq_queue *cfqq)
2039{
2040 int process_refs, io_refs;
2041
2042 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2043 process_refs = cfqq->ref - io_refs;
2044 BUG_ON(process_refs < 0);
2045 return process_refs;
2046}
2047
2048static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2049{
2050 int process_refs, new_process_refs;
2051 struct cfq_queue *__cfqq;
2052
2053
2054
2055
2056
2057
2058
2059 if (!cfqq_process_refs(new_cfqq))
2060 return;
2061
2062
2063 while ((__cfqq = new_cfqq->new_cfqq)) {
2064 if (__cfqq == cfqq)
2065 return;
2066 new_cfqq = __cfqq;
2067 }
2068
2069 process_refs = cfqq_process_refs(cfqq);
2070 new_process_refs = cfqq_process_refs(new_cfqq);
2071
2072
2073
2074
2075 if (process_refs == 0 || new_process_refs == 0)
2076 return;
2077
2078
2079
2080
2081 if (new_process_refs >= process_refs) {
2082 cfqq->new_cfqq = new_cfqq;
2083 new_cfqq->ref += process_refs;
2084 } else {
2085 new_cfqq->new_cfqq = cfqq;
2086 cfqq->ref += new_process_refs;
2087 }
2088}
2089
2090static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2091 struct cfq_group *cfqg, enum wl_prio_t prio)
2092{
2093 struct cfq_queue *queue;
2094 int i;
2095 bool key_valid = false;
2096 unsigned long lowest_key = 0;
2097 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2098
2099 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2100
2101 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2102 if (queue &&
2103 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2104 lowest_key = queue->rb_key;
2105 cur_best = i;
2106 key_valid = true;
2107 }
2108 }
2109
2110 return cur_best;
2111}
2112
2113static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2114{
2115 unsigned slice;
2116 unsigned count;
2117 struct cfq_rb_root *st;
2118 unsigned group_slice;
2119 enum wl_prio_t original_prio = cfqd->serving_prio;
2120
2121
2122 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2123 cfqd->serving_prio = RT_WORKLOAD;
2124 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2125 cfqd->serving_prio = BE_WORKLOAD;
2126 else {
2127 cfqd->serving_prio = IDLE_WORKLOAD;
2128 cfqd->workload_expires = jiffies + 1;
2129 return;
2130 }
2131
2132 if (original_prio != cfqd->serving_prio)
2133 goto new_workload;
2134
2135
2136
2137
2138
2139
2140 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2141 count = st->count;
2142
2143
2144
2145
2146 if (count && !time_after(jiffies, cfqd->workload_expires))
2147 return;
2148
2149new_workload:
2150
2151 cfqd->serving_type =
2152 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2153 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2154 count = st->count;
2155
2156
2157
2158
2159
2160
2161 group_slice = cfq_group_slice(cfqd, cfqg);
2162
2163 slice = group_slice * count /
2164 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2165 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2166
2167 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2168 unsigned int tmp;
2169
2170
2171
2172
2173
2174
2175
2176
2177 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2178 tmp = tmp/cfqd->busy_queues;
2179 slice = min_t(unsigned, slice, tmp);
2180
2181
2182
2183 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2184 } else
2185
2186 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2187
2188 slice = max_t(unsigned, slice, CFQ_MIN_TT);
2189 cfq_log(cfqd, "workload slice:%d", slice);
2190 cfqd->workload_expires = jiffies + slice;
2191}
2192
2193static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2194{
2195 struct cfq_rb_root *st = &cfqd->grp_service_tree;
2196 struct cfq_group *cfqg;
2197
2198 if (RB_EMPTY_ROOT(&st->rb))
2199 return NULL;
2200 cfqg = cfq_rb_first_group(st);
2201 update_min_vdisktime(st);
2202 return cfqg;
2203}
2204
2205static void cfq_choose_cfqg(struct cfq_data *cfqd)
2206{
2207 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2208
2209 cfqd->serving_group = cfqg;
2210
2211
2212 if (cfqg->saved_workload_slice) {
2213 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2214 cfqd->serving_type = cfqg->saved_workload;
2215 cfqd->serving_prio = cfqg->saved_serving_prio;
2216 } else
2217 cfqd->workload_expires = jiffies - 1;
2218
2219 choose_service_tree(cfqd, cfqg);
2220}
2221
2222
2223
2224
2225
2226static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2227{
2228 struct cfq_queue *cfqq, *new_cfqq = NULL;
2229
2230 cfqq = cfqd->active_queue;
2231 if (!cfqq)
2232 goto new_queue;
2233
2234 if (!cfqd->rq_queued)
2235 return NULL;
2236
2237
2238
2239
2240 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2241 goto expire;
2242
2243
2244
2245
2246 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2257 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2258 cfqq = NULL;
2259 goto keep_queue;
2260 } else
2261 goto check_group_idle;
2262 }
2263
2264
2265
2266
2267
2268 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2269 goto keep_queue;
2270
2271
2272
2273
2274
2275
2276
2277 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2278 if (new_cfqq) {
2279 if (!cfqq->new_cfqq)
2280 cfq_setup_merge(cfqq, new_cfqq);
2281 goto expire;
2282 }
2283
2284
2285
2286
2287
2288
2289 if (timer_pending(&cfqd->idle_slice_timer)) {
2290 cfqq = NULL;
2291 goto keep_queue;
2292 }
2293
2294
2295
2296
2297
2298 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2299 (cfq_cfqq_slice_new(cfqq) ||
2300 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2301 cfq_clear_cfqq_deep(cfqq);
2302 cfq_clear_cfqq_idle_window(cfqq);
2303 }
2304
2305 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2306 cfqq = NULL;
2307 goto keep_queue;
2308 }
2309
2310
2311
2312
2313
2314check_group_idle:
2315 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2316 && cfqq->cfqg->dispatched) {
2317 cfqq = NULL;
2318 goto keep_queue;
2319 }
2320
2321expire:
2322 cfq_slice_expired(cfqd, 0);
2323new_queue:
2324
2325
2326
2327
2328 if (!new_cfqq)
2329 cfq_choose_cfqg(cfqd);
2330
2331 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2332keep_queue:
2333 return cfqq;
2334}
2335
2336static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2337{
2338 int dispatched = 0;
2339
2340 while (cfqq->next_rq) {
2341 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2342 dispatched++;
2343 }
2344
2345 BUG_ON(!list_empty(&cfqq->fifo));
2346
2347
2348 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2349 return dispatched;
2350}
2351
2352
2353
2354
2355
2356static int cfq_forced_dispatch(struct cfq_data *cfqd)
2357{
2358 struct cfq_queue *cfqq;
2359 int dispatched = 0;
2360
2361
2362 cfq_slice_expired(cfqd, 0);
2363 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2364 __cfq_set_active_queue(cfqd, cfqq);
2365 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2366 }
2367
2368 BUG_ON(cfqd->busy_queues);
2369
2370 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2371 return dispatched;
2372}
2373
2374static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2375 struct cfq_queue *cfqq)
2376{
2377
2378 if (cfq_cfqq_slice_new(cfqq))
2379 return true;
2380 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2381 cfqq->slice_end))
2382 return true;
2383
2384 return false;
2385}
2386
2387static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2388{
2389 unsigned int max_dispatch;
2390
2391
2392
2393
2394 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2395 return false;
2396
2397
2398
2399
2400 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2401 return false;
2402
2403 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2404 if (cfq_class_idle(cfqq))
2405 max_dispatch = 1;
2406
2407
2408
2409
2410 if (cfqq->dispatched >= max_dispatch) {
2411 bool promote_sync = false;
2412
2413
2414
2415 if (cfq_class_idle(cfqq))
2416 return false;
2417
2418
2419
2420
2421
2422
2423
2424
2425 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2426 promote_sync = true;
2427
2428
2429
2430
2431 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2432 !promote_sync)
2433 return false;
2434
2435
2436
2437
2438 if (cfqd->busy_queues == 1 || promote_sync)
2439 max_dispatch = -1;
2440 else
2441
2442
2443
2444
2445
2446
2447 max_dispatch = cfqd->cfq_quantum;
2448 }
2449
2450
2451
2452
2453
2454
2455 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2456 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2457 unsigned int depth;
2458
2459 depth = last_sync / cfqd->cfq_slice[1];
2460 if (!depth && !cfqq->dispatched)
2461 depth = 1;
2462 if (depth < max_dispatch)
2463 max_dispatch = depth;
2464 }
2465
2466
2467
2468
2469 return cfqq->dispatched < max_dispatch;
2470}
2471
2472
2473
2474
2475
2476static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2477{
2478 struct request *rq;
2479
2480 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2481
2482 if (!cfq_may_dispatch(cfqd, cfqq))
2483 return false;
2484
2485
2486
2487
2488 rq = cfq_check_fifo(cfqq);
2489 if (!rq)
2490 rq = cfqq->next_rq;
2491
2492
2493
2494
2495 cfq_dispatch_insert(cfqd->queue, rq);
2496
2497 if (!cfqd->active_cic) {
2498 struct cfq_io_context *cic = RQ_CIC(rq);
2499
2500 atomic_long_inc(&cic->ioc->refcount);
2501 cfqd->active_cic = cic;
2502 }
2503
2504 return true;
2505}
2506
2507
2508
2509
2510
2511static int cfq_dispatch_requests(struct request_queue *q, int force)
2512{
2513 struct cfq_data *cfqd = q->elevator->elevator_data;
2514 struct cfq_queue *cfqq;
2515
2516 if (!cfqd->busy_queues)
2517 return 0;
2518
2519 if (unlikely(force))
2520 return cfq_forced_dispatch(cfqd);
2521
2522 cfqq = cfq_select_queue(cfqd);
2523 if (!cfqq)
2524 return 0;
2525
2526
2527
2528
2529 if (!cfq_dispatch_request(cfqd, cfqq))
2530 return 0;
2531
2532 cfqq->slice_dispatch++;
2533 cfq_clear_cfqq_must_dispatch(cfqq);
2534
2535
2536
2537
2538
2539 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2540 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2541 cfq_class_idle(cfqq))) {
2542 cfqq->slice_end = jiffies + 1;
2543 cfq_slice_expired(cfqd, 0);
2544 }
2545
2546 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2547 return 1;
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557static void cfq_put_queue(struct cfq_queue *cfqq)
2558{
2559 struct cfq_data *cfqd = cfqq->cfqd;
2560 struct cfq_group *cfqg;
2561
2562 BUG_ON(cfqq->ref <= 0);
2563
2564 cfqq->ref--;
2565 if (cfqq->ref)
2566 return;
2567
2568 cfq_log_cfqq(cfqd, cfqq, "put_queue");
2569 BUG_ON(rb_first(&cfqq->sort_list));
2570 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2571 cfqg = cfqq->cfqg;
2572
2573 if (unlikely(cfqd->active_queue == cfqq)) {
2574 __cfq_slice_expired(cfqd, cfqq, 0);
2575 cfq_schedule_dispatch(cfqd);
2576 }
2577
2578 BUG_ON(cfq_cfqq_on_rr(cfqq));
2579 kmem_cache_free(cfq_pool, cfqq);
2580 cfq_put_cfqg(cfqg);
2581}
2582
2583
2584
2585
2586static void
2587call_for_each_cic(struct io_context *ioc,
2588 void (*func)(struct io_context *, struct cfq_io_context *))
2589{
2590 struct cfq_io_context *cic;
2591 struct hlist_node *n;
2592
2593 rcu_read_lock();
2594
2595 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2596 func(ioc, cic);
2597
2598 rcu_read_unlock();
2599}
2600
2601static void cfq_cic_free_rcu(struct rcu_head *head)
2602{
2603 struct cfq_io_context *cic;
2604
2605 cic = container_of(head, struct cfq_io_context, rcu_head);
2606
2607 kmem_cache_free(cfq_ioc_pool, cic);
2608 elv_ioc_count_dec(cfq_ioc_count);
2609
2610 if (ioc_gone) {
2611
2612
2613
2614
2615
2616 spin_lock(&ioc_gone_lock);
2617 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2618 complete(ioc_gone);
2619 ioc_gone = NULL;
2620 }
2621 spin_unlock(&ioc_gone_lock);
2622 }
2623}
2624
2625static void cfq_cic_free(struct cfq_io_context *cic)
2626{
2627 call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2628}
2629
2630static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2631{
2632 unsigned long flags;
2633 unsigned long dead_key = (unsigned long) cic->key;
2634
2635 BUG_ON(!(dead_key & CIC_DEAD_KEY));
2636
2637 spin_lock_irqsave(&ioc->lock, flags);
2638 radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2639 hlist_del_rcu(&cic->cic_list);
2640 spin_unlock_irqrestore(&ioc->lock, flags);
2641
2642 cfq_cic_free(cic);
2643}
2644
2645
2646
2647
2648
2649
2650static void cfq_free_io_context(struct io_context *ioc)
2651{
2652
2653
2654
2655
2656
2657
2658 call_for_each_cic(ioc, cic_free_func);
2659}
2660
2661static void cfq_put_cooperator(struct cfq_queue *cfqq)
2662{
2663 struct cfq_queue *__cfqq, *next;
2664
2665
2666
2667
2668
2669
2670 __cfqq = cfqq->new_cfqq;
2671 while (__cfqq) {
2672 if (__cfqq == cfqq) {
2673 WARN(1, "cfqq->new_cfqq loop detected\n");
2674 break;
2675 }
2676 next = __cfqq->new_cfqq;
2677 cfq_put_queue(__cfqq);
2678 __cfqq = next;
2679 }
2680}
2681
2682static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2683{
2684 if (unlikely(cfqq == cfqd->active_queue)) {
2685 __cfq_slice_expired(cfqd, cfqq, 0);
2686 cfq_schedule_dispatch(cfqd);
2687 }
2688
2689 cfq_put_cooperator(cfqq);
2690
2691 cfq_put_queue(cfqq);
2692}
2693
2694static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2695 struct cfq_io_context *cic)
2696{
2697 struct io_context *ioc = cic->ioc;
2698
2699 list_del_init(&cic->queue_list);
2700
2701
2702
2703
2704 smp_wmb();
2705 cic->key = cfqd_dead_key(cfqd);
2706
2707 if (ioc->ioc_data == cic)
2708 rcu_assign_pointer(ioc->ioc_data, NULL);
2709
2710 if (cic->cfqq[BLK_RW_ASYNC]) {
2711 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2712 cic->cfqq[BLK_RW_ASYNC] = NULL;
2713 }
2714
2715 if (cic->cfqq[BLK_RW_SYNC]) {
2716 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2717 cic->cfqq[BLK_RW_SYNC] = NULL;
2718 }
2719}
2720
2721static void cfq_exit_single_io_context(struct io_context *ioc,
2722 struct cfq_io_context *cic)
2723{
2724 struct cfq_data *cfqd = cic_to_cfqd(cic);
2725
2726 if (cfqd) {
2727 struct request_queue *q = cfqd->queue;
2728 unsigned long flags;
2729
2730 spin_lock_irqsave(q->queue_lock, flags);
2731
2732
2733
2734
2735
2736 smp_read_barrier_depends();
2737 if (cic->key == cfqd)
2738 __cfq_exit_single_io_context(cfqd, cic);
2739
2740 spin_unlock_irqrestore(q->queue_lock, flags);
2741 }
2742}
2743
2744
2745
2746
2747
2748static void cfq_exit_io_context(struct io_context *ioc)
2749{
2750 call_for_each_cic(ioc, cfq_exit_single_io_context);
2751}
2752
2753static struct cfq_io_context *
2754cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2755{
2756 struct cfq_io_context *cic;
2757
2758 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2759 cfqd->queue->node);
2760 if (cic) {
2761 cic->last_end_request = jiffies;
2762 INIT_LIST_HEAD(&cic->queue_list);
2763 INIT_HLIST_NODE(&cic->cic_list);
2764 cic->dtor = cfq_free_io_context;
2765 cic->exit = cfq_exit_io_context;
2766 elv_ioc_count_inc(cfq_ioc_count);
2767 }
2768
2769 return cic;
2770}
2771
2772static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2773{
2774 struct task_struct *tsk = current;
2775 int ioprio_class;
2776
2777 if (!cfq_cfqq_prio_changed(cfqq))
2778 return;
2779
2780 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2781 switch (ioprio_class) {
2782 default:
2783 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2784 case IOPRIO_CLASS_NONE:
2785
2786
2787
2788 cfqq->ioprio = task_nice_ioprio(tsk);
2789 cfqq->ioprio_class = task_nice_ioclass(tsk);
2790 break;
2791 case IOPRIO_CLASS_RT:
2792 cfqq->ioprio = task_ioprio(ioc);
2793 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2794 break;
2795 case IOPRIO_CLASS_BE:
2796 cfqq->ioprio = task_ioprio(ioc);
2797 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2798 break;
2799 case IOPRIO_CLASS_IDLE:
2800 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2801 cfqq->ioprio = 7;
2802 cfq_clear_cfqq_idle_window(cfqq);
2803 break;
2804 }
2805
2806
2807
2808
2809
2810 cfqq->org_ioprio = cfqq->ioprio;
2811 cfqq->org_ioprio_class = cfqq->ioprio_class;
2812 cfq_clear_cfqq_prio_changed(cfqq);
2813}
2814
2815static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2816{
2817 struct cfq_data *cfqd = cic_to_cfqd(cic);
2818 struct cfq_queue *cfqq;
2819 unsigned long flags;
2820
2821 if (unlikely(!cfqd))
2822 return;
2823
2824 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2825
2826 cfqq = cic->cfqq[BLK_RW_ASYNC];
2827 if (cfqq) {
2828 struct cfq_queue *new_cfqq;
2829 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2830 GFP_ATOMIC);
2831 if (new_cfqq) {
2832 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2833 cfq_put_queue(cfqq);
2834 }
2835 }
2836
2837 cfqq = cic->cfqq[BLK_RW_SYNC];
2838 if (cfqq)
2839 cfq_mark_cfqq_prio_changed(cfqq);
2840
2841 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2842}
2843
2844static void cfq_ioc_set_ioprio(struct io_context *ioc)
2845{
2846 call_for_each_cic(ioc, changed_ioprio);
2847 ioc->ioprio_changed = 0;
2848}
2849
2850static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2851 pid_t pid, bool is_sync)
2852{
2853 RB_CLEAR_NODE(&cfqq->rb_node);
2854 RB_CLEAR_NODE(&cfqq->p_node);
2855 INIT_LIST_HEAD(&cfqq->fifo);
2856
2857 cfqq->ref = 0;
2858 cfqq->cfqd = cfqd;
2859
2860 cfq_mark_cfqq_prio_changed(cfqq);
2861
2862 if (is_sync) {
2863 if (!cfq_class_idle(cfqq))
2864 cfq_mark_cfqq_idle_window(cfqq);
2865 cfq_mark_cfqq_sync(cfqq);
2866 }
2867 cfqq->pid = pid;
2868}
2869
2870#ifdef CONFIG_CFQ_GROUP_IOSCHED
2871static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2872{
2873 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2874 struct cfq_data *cfqd = cic_to_cfqd(cic);
2875 unsigned long flags;
2876 struct request_queue *q;
2877
2878 if (unlikely(!cfqd))
2879 return;
2880
2881 q = cfqd->queue;
2882
2883 spin_lock_irqsave(q->queue_lock, flags);
2884
2885 if (sync_cfqq) {
2886
2887
2888
2889
2890 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2891 cic_set_cfqq(cic, NULL, 1);
2892 cfq_put_queue(sync_cfqq);
2893 }
2894
2895 spin_unlock_irqrestore(q->queue_lock, flags);
2896}
2897
2898static void cfq_ioc_set_cgroup(struct io_context *ioc)
2899{
2900 call_for_each_cic(ioc, changed_cgroup);
2901 ioc->cgroup_changed = 0;
2902}
2903#endif
2904
2905static struct cfq_queue *
2906cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2907 struct io_context *ioc, gfp_t gfp_mask)
2908{
2909 struct cfq_queue *cfqq, *new_cfqq = NULL;
2910 struct cfq_io_context *cic;
2911 struct cfq_group *cfqg;
2912
2913retry:
2914 cfqg = cfq_get_cfqg(cfqd, 1);
2915 cic = cfq_cic_lookup(cfqd, ioc);
2916
2917 cfqq = cic_to_cfqq(cic, is_sync);
2918
2919
2920
2921
2922
2923 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2924 cfqq = NULL;
2925 if (new_cfqq) {
2926 cfqq = new_cfqq;
2927 new_cfqq = NULL;
2928 } else if (gfp_mask & __GFP_WAIT) {
2929 spin_unlock_irq(cfqd->queue->queue_lock);
2930 new_cfqq = kmem_cache_alloc_node(cfq_pool,
2931 gfp_mask | __GFP_ZERO,
2932 cfqd->queue->node);
2933 spin_lock_irq(cfqd->queue->queue_lock);
2934 if (new_cfqq)
2935 goto retry;
2936 } else {
2937 cfqq = kmem_cache_alloc_node(cfq_pool,
2938 gfp_mask | __GFP_ZERO,
2939 cfqd->queue->node);
2940 }
2941
2942 if (cfqq) {
2943 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2944 cfq_init_prio_data(cfqq, ioc);
2945 cfq_link_cfqq_cfqg(cfqq, cfqg);
2946 cfq_log_cfqq(cfqd, cfqq, "alloced");
2947 } else
2948 cfqq = &cfqd->oom_cfqq;
2949 }
2950
2951 if (new_cfqq)
2952 kmem_cache_free(cfq_pool, new_cfqq);
2953
2954 return cfqq;
2955}
2956
2957static struct cfq_queue **
2958cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2959{
2960 switch (ioprio_class) {
2961 case IOPRIO_CLASS_RT:
2962 return &cfqd->async_cfqq[0][ioprio];
2963 case IOPRIO_CLASS_BE:
2964 return &cfqd->async_cfqq[1][ioprio];
2965 case IOPRIO_CLASS_IDLE:
2966 return &cfqd->async_idle_cfqq;
2967 default:
2968 BUG();
2969 }
2970}
2971
2972static struct cfq_queue *
2973cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2974 gfp_t gfp_mask)
2975{
2976 const int ioprio = task_ioprio(ioc);
2977 const int ioprio_class = task_ioprio_class(ioc);
2978 struct cfq_queue **async_cfqq = NULL;
2979 struct cfq_queue *cfqq = NULL;
2980
2981 if (!is_sync) {
2982 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2983 cfqq = *async_cfqq;
2984 }
2985
2986 if (!cfqq)
2987 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2988
2989
2990
2991
2992 if (!is_sync && !(*async_cfqq)) {
2993 cfqq->ref++;
2994 *async_cfqq = cfqq;
2995 }
2996
2997 cfqq->ref++;
2998 return cfqq;
2999}
3000
3001
3002
3003
3004static void
3005cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
3006 struct cfq_io_context *cic)
3007{
3008 unsigned long flags;
3009
3010 WARN_ON(!list_empty(&cic->queue_list));
3011 BUG_ON(cic->key != cfqd_dead_key(cfqd));
3012
3013 spin_lock_irqsave(&ioc->lock, flags);
3014
3015 BUG_ON(ioc->ioc_data == cic);
3016
3017 radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
3018 hlist_del_rcu(&cic->cic_list);
3019 spin_unlock_irqrestore(&ioc->lock, flags);
3020
3021 cfq_cic_free(cic);
3022}
3023
3024static struct cfq_io_context *
3025cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3026{
3027 struct cfq_io_context *cic;
3028 unsigned long flags;
3029
3030 if (unlikely(!ioc))
3031 return NULL;
3032
3033 rcu_read_lock();
3034
3035
3036
3037
3038 cic = rcu_dereference(ioc->ioc_data);
3039 if (cic && cic->key == cfqd) {
3040 rcu_read_unlock();
3041 return cic;
3042 }
3043
3044 do {
3045 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3046 rcu_read_unlock();
3047 if (!cic)
3048 break;
3049 if (unlikely(cic->key != cfqd)) {
3050 cfq_drop_dead_cic(cfqd, ioc, cic);
3051 rcu_read_lock();
3052 continue;
3053 }
3054
3055 spin_lock_irqsave(&ioc->lock, flags);
3056 rcu_assign_pointer(ioc->ioc_data, cic);
3057 spin_unlock_irqrestore(&ioc->lock, flags);
3058 break;
3059 } while (1);
3060
3061 return cic;
3062}
3063
3064
3065
3066
3067
3068
3069static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3070 struct cfq_io_context *cic, gfp_t gfp_mask)
3071{
3072 unsigned long flags;
3073 int ret;
3074
3075 ret = radix_tree_preload(gfp_mask);
3076 if (!ret) {
3077 cic->ioc = ioc;
3078 cic->key = cfqd;
3079
3080 spin_lock_irqsave(&ioc->lock, flags);
3081 ret = radix_tree_insert(&ioc->radix_root,
3082 cfqd->cic_index, cic);
3083 if (!ret)
3084 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
3085 spin_unlock_irqrestore(&ioc->lock, flags);
3086
3087 radix_tree_preload_end();
3088
3089 if (!ret) {
3090 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3091 list_add(&cic->queue_list, &cfqd->cic_list);
3092 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3093 }
3094 }
3095
3096 if (ret)
3097 printk(KERN_ERR "cfq: cic link failed!\n");
3098
3099 return ret;
3100}
3101
3102
3103
3104
3105
3106
3107static struct cfq_io_context *
3108cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3109{
3110 struct io_context *ioc = NULL;
3111 struct cfq_io_context *cic;
3112
3113 might_sleep_if(gfp_mask & __GFP_WAIT);
3114
3115 ioc = get_io_context(gfp_mask, cfqd->queue->node);
3116 if (!ioc)
3117 return NULL;
3118
3119 cic = cfq_cic_lookup(cfqd, ioc);
3120 if (cic)
3121 goto out;
3122
3123 cic = cfq_alloc_io_context(cfqd, gfp_mask);
3124 if (cic == NULL)
3125 goto err;
3126
3127 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3128 goto err_free;
3129
3130out:
3131 smp_read_barrier_depends();
3132 if (unlikely(ioc->ioprio_changed))
3133 cfq_ioc_set_ioprio(ioc);
3134
3135#ifdef CONFIG_CFQ_GROUP_IOSCHED
3136 if (unlikely(ioc->cgroup_changed))
3137 cfq_ioc_set_cgroup(ioc);
3138#endif
3139 return cic;
3140err_free:
3141 cfq_cic_free(cic);
3142err:
3143 put_io_context(ioc);
3144 return NULL;
3145}
3146
3147static void
3148cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3149{
3150 unsigned long elapsed = jiffies - cic->last_end_request;
3151 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3152
3153 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3154 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3155 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3156}
3157
3158static void
3159cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3160 struct request *rq)
3161{
3162 sector_t sdist = 0;
3163 sector_t n_sec = blk_rq_sectors(rq);
3164 if (cfqq->last_request_pos) {
3165 if (cfqq->last_request_pos < blk_rq_pos(rq))
3166 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3167 else
3168 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3169 }
3170
3171 cfqq->seek_history <<= 1;
3172 if (blk_queue_nonrot(cfqd->queue))
3173 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3174 else
3175 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3176}
3177
3178
3179
3180
3181
3182static void
3183cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3184 struct cfq_io_context *cic)
3185{
3186 int old_idle, enable_idle;
3187
3188
3189
3190
3191 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3192 return;
3193
3194 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3195
3196 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3197 cfq_mark_cfqq_deep(cfqq);
3198
3199 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3200 enable_idle = 0;
3201 else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3202 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3203 enable_idle = 0;
3204 else if (sample_valid(cic->ttime_samples)) {
3205 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3206 enable_idle = 0;
3207 else
3208 enable_idle = 1;
3209 }
3210
3211 if (old_idle != enable_idle) {
3212 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3213 if (enable_idle)
3214 cfq_mark_cfqq_idle_window(cfqq);
3215 else
3216 cfq_clear_cfqq_idle_window(cfqq);
3217 }
3218}
3219
3220
3221
3222
3223
3224static bool
3225cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3226 struct request *rq)
3227{
3228 struct cfq_queue *cfqq;
3229
3230 cfqq = cfqd->active_queue;
3231 if (!cfqq)
3232 return false;
3233
3234 if (cfq_class_idle(new_cfqq))
3235 return false;
3236
3237 if (cfq_class_idle(cfqq))
3238 return true;
3239
3240
3241
3242
3243 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3244 return false;
3245
3246
3247
3248
3249
3250 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3251 return true;
3252
3253 if (new_cfqq->cfqg != cfqq->cfqg)
3254 return false;
3255
3256 if (cfq_slice_used(cfqq))
3257 return true;
3258
3259
3260 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3261 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3262 new_cfqq->service_tree->count == 2 &&
3263 RB_EMPTY_ROOT(&cfqq->sort_list))
3264 return true;
3265
3266
3267
3268
3269
3270 if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3271 return true;
3272
3273
3274
3275
3276 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3277 return true;
3278
3279
3280 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3281 return true;
3282
3283 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3284 return false;
3285
3286
3287
3288
3289
3290 if (cfq_rq_close(cfqd, cfqq, rq))
3291 return true;
3292
3293 return false;
3294}
3295
3296
3297
3298
3299
3300static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3301{
3302 struct cfq_queue *old_cfqq = cfqd->active_queue;
3303
3304 cfq_log_cfqq(cfqd, cfqq, "preempt");
3305 cfq_slice_expired(cfqd, 1);
3306
3307
3308
3309
3310
3311 if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
3312 cfqq->cfqg->saved_workload_slice = 0;
3313
3314
3315
3316
3317
3318 BUG_ON(!cfq_cfqq_on_rr(cfqq));
3319
3320 cfq_service_tree_add(cfqd, cfqq, 1);
3321
3322 cfqq->slice_end = 0;
3323 cfq_mark_cfqq_slice_new(cfqq);
3324}
3325
3326
3327
3328
3329
3330static void
3331cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3332 struct request *rq)
3333{
3334 struct cfq_io_context *cic = RQ_CIC(rq);
3335
3336 cfqd->rq_queued++;
3337 if (rq->cmd_flags & REQ_META)
3338 cfqq->meta_pending++;
3339
3340 cfq_update_io_thinktime(cfqd, cic);
3341 cfq_update_io_seektime(cfqd, cfqq, rq);
3342 cfq_update_idle_window(cfqd, cfqq, cic);
3343
3344 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3345
3346 if (cfqq == cfqd->active_queue) {
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357 if (cfq_cfqq_wait_request(cfqq)) {
3358 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3359 cfqd->busy_queues > 1) {
3360 cfq_del_timer(cfqd, cfqq);
3361 cfq_clear_cfqq_wait_request(cfqq);
3362 __blk_run_queue(cfqd->queue);
3363 } else {
3364 cfq_blkiocg_update_idle_time_stats(
3365 &cfqq->cfqg->blkg);
3366 cfq_mark_cfqq_must_dispatch(cfqq);
3367 }
3368 }
3369 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3370
3371
3372
3373
3374
3375
3376 cfq_preempt_queue(cfqd, cfqq);
3377 __blk_run_queue(cfqd->queue);
3378 }
3379}
3380
3381static void cfq_insert_request(struct request_queue *q, struct request *rq)
3382{
3383 struct cfq_data *cfqd = q->elevator->elevator_data;
3384 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3385
3386 cfq_log_cfqq(cfqd, cfqq, "insert_request");
3387 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3388
3389 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3390 list_add_tail(&rq->queuelist, &cfqq->fifo);
3391 cfq_add_rq_rb(rq);
3392 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3393 &cfqd->serving_group->blkg, rq_data_dir(rq),
3394 rq_is_sync(rq));
3395 cfq_rq_enqueued(cfqd, cfqq, rq);
3396}
3397
3398
3399
3400
3401
3402static void cfq_update_hw_tag(struct cfq_data *cfqd)
3403{
3404 struct cfq_queue *cfqq = cfqd->active_queue;
3405
3406 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3407 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3408
3409 if (cfqd->hw_tag == 1)
3410 return;
3411
3412 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3413 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3414 return;
3415
3416
3417
3418
3419
3420
3421 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3422 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3423 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3424 return;
3425
3426 if (cfqd->hw_tag_samples++ < 50)
3427 return;
3428
3429 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3430 cfqd->hw_tag = 1;
3431 else
3432 cfqd->hw_tag = 0;
3433}
3434
3435static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3436{
3437 struct cfq_io_context *cic = cfqd->active_cic;
3438
3439
3440 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3441 return false;
3442
3443
3444 if (cfqq->cfqg->nr_cfqq > 1)
3445 return false;
3446
3447 if (cfq_slice_used(cfqq))
3448 return true;
3449
3450
3451 if (cic && sample_valid(cic->ttime_samples)
3452 && (cfqq->slice_end - jiffies < cic->ttime_mean))
3453 return true;
3454
3455
3456
3457
3458
3459
3460
3461
3462 if (cfqq->slice_end - jiffies == 1)
3463 return true;
3464
3465 return false;
3466}
3467
3468static void cfq_completed_request(struct request_queue *q, struct request *rq)
3469{
3470 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3471 struct cfq_data *cfqd = cfqq->cfqd;
3472 const int sync = rq_is_sync(rq);
3473 unsigned long now;
3474
3475 now = jiffies;
3476 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3477 !!(rq->cmd_flags & REQ_NOIDLE));
3478
3479 cfq_update_hw_tag(cfqd);
3480
3481 WARN_ON(!cfqd->rq_in_driver);
3482 WARN_ON(!cfqq->dispatched);
3483 cfqd->rq_in_driver--;
3484 cfqq->dispatched--;
3485 (RQ_CFQG(rq))->dispatched--;
3486 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3487 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3488 rq_data_dir(rq), rq_is_sync(rq));
3489
3490 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3491
3492 if (sync) {
3493 RQ_CIC(rq)->last_end_request = now;
3494 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3495 cfqd->last_delayed_sync = now;
3496 }
3497
3498
3499
3500
3501
3502 if (cfqd->active_queue == cfqq) {
3503 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3504
3505 if (cfq_cfqq_slice_new(cfqq)) {
3506 cfq_set_prio_slice(cfqd, cfqq);
3507 cfq_clear_cfqq_slice_new(cfqq);
3508 }
3509
3510
3511
3512
3513
3514 if (cfq_should_wait_busy(cfqd, cfqq)) {
3515 unsigned long extend_sl = cfqd->cfq_slice_idle;
3516 if (!cfqd->cfq_slice_idle)
3517 extend_sl = cfqd->cfq_group_idle;
3518 cfqq->slice_end = jiffies + extend_sl;
3519 cfq_mark_cfqq_wait_busy(cfqq);
3520 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3521 }
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3532 cfq_slice_expired(cfqd, 1);
3533 else if (sync && cfqq_empty &&
3534 !cfq_close_cooperator(cfqd, cfqq)) {
3535 cfq_arm_slice_timer(cfqd);
3536 }
3537 }
3538
3539 if (!cfqd->rq_in_driver)
3540 cfq_schedule_dispatch(cfqd);
3541}
3542
3543
3544
3545
3546
3547static void cfq_prio_boost(struct cfq_queue *cfqq)
3548{
3549 if (has_fs_excl()) {
3550
3551
3552
3553
3554 if (cfq_class_idle(cfqq))
3555 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3556 if (cfqq->ioprio > IOPRIO_NORM)
3557 cfqq->ioprio = IOPRIO_NORM;
3558 } else {
3559
3560
3561
3562 cfqq->ioprio_class = cfqq->org_ioprio_class;
3563 cfqq->ioprio = cfqq->org_ioprio;
3564 }
3565}
3566
3567static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3568{
3569 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3570 cfq_mark_cfqq_must_alloc_slice(cfqq);
3571 return ELV_MQUEUE_MUST;
3572 }
3573
3574 return ELV_MQUEUE_MAY;
3575}
3576
3577static int cfq_may_queue(struct request_queue *q, int rw)
3578{
3579 struct cfq_data *cfqd = q->elevator->elevator_data;
3580 struct task_struct *tsk = current;
3581 struct cfq_io_context *cic;
3582 struct cfq_queue *cfqq;
3583
3584
3585
3586
3587
3588
3589
3590 cic = cfq_cic_lookup(cfqd, tsk->io_context);
3591 if (!cic)
3592 return ELV_MQUEUE_MAY;
3593
3594 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3595 if (cfqq) {
3596 cfq_init_prio_data(cfqq, cic->ioc);
3597 cfq_prio_boost(cfqq);
3598
3599 return __cfq_may_queue(cfqq);
3600 }
3601
3602 return ELV_MQUEUE_MAY;
3603}
3604
3605
3606
3607
3608static void cfq_put_request(struct request *rq)
3609{
3610 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3611
3612 if (cfqq) {
3613 const int rw = rq_data_dir(rq);
3614
3615 BUG_ON(!cfqq->allocated[rw]);
3616 cfqq->allocated[rw]--;
3617
3618 put_io_context(RQ_CIC(rq)->ioc);
3619
3620 rq->elevator_private[0] = NULL;
3621 rq->elevator_private[1] = NULL;
3622
3623
3624 cfq_put_cfqg(RQ_CFQG(rq));
3625 rq->elevator_private[2] = NULL;
3626
3627 cfq_put_queue(cfqq);
3628 }
3629}
3630
3631static struct cfq_queue *
3632cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3633 struct cfq_queue *cfqq)
3634{
3635 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3636 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3637 cfq_mark_cfqq_coop(cfqq->new_cfqq);
3638 cfq_put_queue(cfqq);
3639 return cic_to_cfqq(cic, 1);
3640}
3641
3642
3643
3644
3645
3646static struct cfq_queue *
3647split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3648{
3649 if (cfqq_process_refs(cfqq) == 1) {
3650 cfqq->pid = current->pid;
3651 cfq_clear_cfqq_coop(cfqq);
3652 cfq_clear_cfqq_split_coop(cfqq);
3653 return cfqq;
3654 }
3655
3656 cic_set_cfqq(cic, NULL, 1);
3657
3658 cfq_put_cooperator(cfqq);
3659
3660 cfq_put_queue(cfqq);
3661 return NULL;
3662}
3663
3664
3665
3666static int
3667cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3668{
3669 struct cfq_data *cfqd = q->elevator->elevator_data;
3670 struct cfq_io_context *cic;
3671 const int rw = rq_data_dir(rq);
3672 const bool is_sync = rq_is_sync(rq);
3673 struct cfq_queue *cfqq;
3674 unsigned long flags;
3675
3676 might_sleep_if(gfp_mask & __GFP_WAIT);
3677
3678 cic = cfq_get_io_context(cfqd, gfp_mask);
3679
3680 spin_lock_irqsave(q->queue_lock, flags);
3681
3682 if (!cic)
3683 goto queue_fail;
3684
3685new_queue:
3686 cfqq = cic_to_cfqq(cic, is_sync);
3687 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3688 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3689 cic_set_cfqq(cic, cfqq, is_sync);
3690 } else {
3691
3692
3693
3694 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3695 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3696 cfqq = split_cfqq(cic, cfqq);
3697 if (!cfqq)
3698 goto new_queue;
3699 }
3700
3701
3702
3703
3704
3705
3706
3707 if (cfqq->new_cfqq)
3708 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3709 }
3710
3711 cfqq->allocated[rw]++;
3712
3713 cfqq->ref++;
3714 rq->elevator_private[0] = cic;
3715 rq->elevator_private[1] = cfqq;
3716 rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
3717 spin_unlock_irqrestore(q->queue_lock, flags);
3718 return 0;
3719
3720queue_fail:
3721 if (cic)
3722 put_io_context(cic->ioc);
3723
3724 cfq_schedule_dispatch(cfqd);
3725 spin_unlock_irqrestore(q->queue_lock, flags);
3726 cfq_log(cfqd, "set_request fail");
3727 return 1;
3728}
3729
3730static void cfq_kick_queue(struct work_struct *work)
3731{
3732 struct cfq_data *cfqd =
3733 container_of(work, struct cfq_data, unplug_work);
3734 struct request_queue *q = cfqd->queue;
3735
3736 spin_lock_irq(q->queue_lock);
3737 __blk_run_queue(cfqd->queue);
3738 spin_unlock_irq(q->queue_lock);
3739}
3740
3741
3742
3743
3744static void cfq_idle_slice_timer(unsigned long data)
3745{
3746 struct cfq_data *cfqd = (struct cfq_data *) data;
3747 struct cfq_queue *cfqq;
3748 unsigned long flags;
3749 int timed_out = 1;
3750
3751 cfq_log(cfqd, "idle timer fired");
3752
3753 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3754
3755 cfqq = cfqd->active_queue;
3756 if (cfqq) {
3757 timed_out = 0;
3758
3759
3760
3761
3762 if (cfq_cfqq_must_dispatch(cfqq))
3763 goto out_kick;
3764
3765
3766
3767
3768 if (cfq_slice_used(cfqq))
3769 goto expire;
3770
3771
3772
3773
3774
3775 if (!cfqd->busy_queues)
3776 goto out_cont;
3777
3778
3779
3780
3781 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3782 goto out_kick;
3783
3784
3785
3786
3787 cfq_clear_cfqq_deep(cfqq);
3788 }
3789expire:
3790 cfq_slice_expired(cfqd, timed_out);
3791out_kick:
3792 cfq_schedule_dispatch(cfqd);
3793out_cont:
3794 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3795}
3796
3797static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3798{
3799 del_timer_sync(&cfqd->idle_slice_timer);
3800 cancel_work_sync(&cfqd->unplug_work);
3801}
3802
3803static void cfq_put_async_queues(struct cfq_data *cfqd)
3804{
3805 int i;
3806
3807 for (i = 0; i < IOPRIO_BE_NR; i++) {
3808 if (cfqd->async_cfqq[0][i])
3809 cfq_put_queue(cfqd->async_cfqq[0][i]);
3810 if (cfqd->async_cfqq[1][i])
3811 cfq_put_queue(cfqd->async_cfqq[1][i]);
3812 }
3813
3814 if (cfqd->async_idle_cfqq)
3815 cfq_put_queue(cfqd->async_idle_cfqq);
3816}
3817
3818static void cfq_cfqd_free(struct rcu_head *head)
3819{
3820 kfree(container_of(head, struct cfq_data, rcu));
3821}
3822
3823static void cfq_exit_queue(struct elevator_queue *e)
3824{
3825 struct cfq_data *cfqd = e->elevator_data;
3826 struct request_queue *q = cfqd->queue;
3827
3828 cfq_shutdown_timer_wq(cfqd);
3829
3830 spin_lock_irq(q->queue_lock);
3831
3832 if (cfqd->active_queue)
3833 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3834
3835 while (!list_empty(&cfqd->cic_list)) {
3836 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3837 struct cfq_io_context,
3838 queue_list);
3839
3840 __cfq_exit_single_io_context(cfqd, cic);
3841 }
3842
3843 cfq_put_async_queues(cfqd);
3844 cfq_release_cfq_groups(cfqd);
3845 cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3846
3847 spin_unlock_irq(q->queue_lock);
3848
3849 cfq_shutdown_timer_wq(cfqd);
3850
3851 spin_lock(&cic_index_lock);
3852 ida_remove(&cic_index_ida, cfqd->cic_index);
3853 spin_unlock(&cic_index_lock);
3854
3855
3856 call_rcu(&cfqd->rcu, cfq_cfqd_free);
3857}
3858
3859static int cfq_alloc_cic_index(void)
3860{
3861 int index, error;
3862
3863 do {
3864 if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3865 return -ENOMEM;
3866
3867 spin_lock(&cic_index_lock);
3868 error = ida_get_new(&cic_index_ida, &index);
3869 spin_unlock(&cic_index_lock);
3870 if (error && error != -EAGAIN)
3871 return error;
3872 } while (error);
3873
3874 return index;
3875}
3876
3877static void *cfq_init_queue(struct request_queue *q)
3878{
3879 struct cfq_data *cfqd;
3880 int i, j;
3881 struct cfq_group *cfqg;
3882 struct cfq_rb_root *st;
3883
3884 i = cfq_alloc_cic_index();
3885 if (i < 0)
3886 return NULL;
3887
3888 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3889 if (!cfqd)
3890 return NULL;
3891
3892
3893
3894
3895
3896 cfqd->cic_index = i;
3897
3898
3899 cfqd->grp_service_tree = CFQ_RB_ROOT;
3900
3901
3902 cfqg = &cfqd->root_group;
3903 for_each_cfqg_st(cfqg, i, j, st)
3904 *st = CFQ_RB_ROOT;
3905 RB_CLEAR_NODE(&cfqg->rb_node);
3906
3907
3908 cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3909
3910#ifdef CONFIG_CFQ_GROUP_IOSCHED
3911
3912
3913
3914
3915 cfqg->ref = 1;
3916 rcu_read_lock();
3917 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3918 (void *)cfqd, 0);
3919 rcu_read_unlock();
3920#endif
3921
3922
3923
3924
3925
3926 for (i = 0; i < CFQ_PRIO_LISTS; i++)
3927 cfqd->prio_trees[i] = RB_ROOT;
3928
3929
3930
3931
3932
3933
3934 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3935 cfqd->oom_cfqq.ref++;
3936 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3937
3938 INIT_LIST_HEAD(&cfqd->cic_list);
3939
3940 cfqd->queue = q;
3941
3942 init_timer(&cfqd->idle_slice_timer);
3943 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3944 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3945
3946 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3947
3948 cfqd->cfq_quantum = cfq_quantum;
3949 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3950 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3951 cfqd->cfq_back_max = cfq_back_max;
3952 cfqd->cfq_back_penalty = cfq_back_penalty;
3953 cfqd->cfq_slice[0] = cfq_slice_async;
3954 cfqd->cfq_slice[1] = cfq_slice_sync;
3955 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3956 cfqd->cfq_slice_idle = cfq_slice_idle;
3957 cfqd->cfq_group_idle = cfq_group_idle;
3958 cfqd->cfq_latency = 1;
3959 cfqd->hw_tag = -1;
3960
3961
3962
3963
3964 cfqd->last_delayed_sync = jiffies - HZ;
3965 return cfqd;
3966}
3967
3968static void cfq_slab_kill(void)
3969{
3970
3971
3972
3973
3974 if (cfq_pool)
3975 kmem_cache_destroy(cfq_pool);
3976 if (cfq_ioc_pool)
3977 kmem_cache_destroy(cfq_ioc_pool);
3978}
3979
3980static int __init cfq_slab_setup(void)
3981{
3982 cfq_pool = KMEM_CACHE(cfq_queue, 0);
3983 if (!cfq_pool)
3984 goto fail;
3985
3986 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3987 if (!cfq_ioc_pool)
3988 goto fail;
3989
3990 return 0;
3991fail:
3992 cfq_slab_kill();
3993 return -ENOMEM;
3994}
3995
3996
3997
3998
3999static ssize_t
4000cfq_var_show(unsigned int var, char *page)
4001{
4002 return sprintf(page, "%d\n", var);
4003}
4004
4005static ssize_t
4006cfq_var_store(unsigned int *var, const char *page, size_t count)
4007{
4008 char *p = (char *) page;
4009
4010 *var = simple_strtoul(p, &p, 10);
4011 return count;
4012}
4013
4014#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4015static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4016{ \
4017 struct cfq_data *cfqd = e->elevator_data; \
4018 unsigned int __data = __VAR; \
4019 if (__CONV) \
4020 __data = jiffies_to_msecs(__data); \
4021 return cfq_var_show(__data, (page)); \
4022}
4023SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4024SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4025SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4026SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4027SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4028SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4029SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4030SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4031SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4032SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4033SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4034#undef SHOW_FUNCTION
4035
4036#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4037static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4038{ \
4039 struct cfq_data *cfqd = e->elevator_data; \
4040 unsigned int __data; \
4041 int ret = cfq_var_store(&__data, (page), count); \
4042 if (__data < (MIN)) \
4043 __data = (MIN); \
4044 else if (__data > (MAX)) \
4045 __data = (MAX); \
4046 if (__CONV) \
4047 *(__PTR) = msecs_to_jiffies(__data); \
4048 else \
4049 *(__PTR) = __data; \
4050 return ret; \
4051}
4052STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4053STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4054 UINT_MAX, 1);
4055STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4056 UINT_MAX, 1);
4057STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4058STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4059 UINT_MAX, 0);
4060STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4061STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4062STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4063STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4064STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4065 UINT_MAX, 0);
4066STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4067#undef STORE_FUNCTION
4068
4069#define CFQ_ATTR(name) \
4070 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4071
4072static struct elv_fs_entry cfq_attrs[] = {
4073 CFQ_ATTR(quantum),
4074 CFQ_ATTR(fifo_expire_sync),
4075 CFQ_ATTR(fifo_expire_async),
4076 CFQ_ATTR(back_seek_max),
4077 CFQ_ATTR(back_seek_penalty),
4078 CFQ_ATTR(slice_sync),
4079 CFQ_ATTR(slice_async),
4080 CFQ_ATTR(slice_async_rq),
4081 CFQ_ATTR(slice_idle),
4082 CFQ_ATTR(group_idle),
4083 CFQ_ATTR(low_latency),
4084 __ATTR_NULL
4085};
4086
4087static struct elevator_type iosched_cfq = {
4088 .ops = {
4089 .elevator_merge_fn = cfq_merge,
4090 .elevator_merged_fn = cfq_merged_request,
4091 .elevator_merge_req_fn = cfq_merged_requests,
4092 .elevator_allow_merge_fn = cfq_allow_merge,
4093 .elevator_bio_merged_fn = cfq_bio_merged,
4094 .elevator_dispatch_fn = cfq_dispatch_requests,
4095 .elevator_add_req_fn = cfq_insert_request,
4096 .elevator_activate_req_fn = cfq_activate_request,
4097 .elevator_deactivate_req_fn = cfq_deactivate_request,
4098 .elevator_completed_req_fn = cfq_completed_request,
4099 .elevator_former_req_fn = elv_rb_former_request,
4100 .elevator_latter_req_fn = elv_rb_latter_request,
4101 .elevator_set_req_fn = cfq_set_request,
4102 .elevator_put_req_fn = cfq_put_request,
4103 .elevator_may_queue_fn = cfq_may_queue,
4104 .elevator_init_fn = cfq_init_queue,
4105 .elevator_exit_fn = cfq_exit_queue,
4106 .trim = cfq_free_io_context,
4107 },
4108 .elevator_attrs = cfq_attrs,
4109 .elevator_name = "cfq",
4110 .elevator_owner = THIS_MODULE,
4111};
4112
4113#ifdef CONFIG_CFQ_GROUP_IOSCHED
4114static struct blkio_policy_type blkio_policy_cfq = {
4115 .ops = {
4116 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
4117 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4118 },
4119 .plid = BLKIO_POLICY_PROP,
4120};
4121#else
4122static struct blkio_policy_type blkio_policy_cfq;
4123#endif
4124
4125static int __init cfq_init(void)
4126{
4127
4128
4129
4130 if (!cfq_slice_async)
4131 cfq_slice_async = 1;
4132 if (!cfq_slice_idle)
4133 cfq_slice_idle = 1;
4134
4135#ifdef CONFIG_CFQ_GROUP_IOSCHED
4136 if (!cfq_group_idle)
4137 cfq_group_idle = 1;
4138#else
4139 cfq_group_idle = 0;
4140#endif
4141 if (cfq_slab_setup())
4142 return -ENOMEM;
4143
4144 elv_register(&iosched_cfq);
4145 blkio_policy_register(&blkio_policy_cfq);
4146
4147 return 0;
4148}
4149
4150static void __exit cfq_exit(void)
4151{
4152 DECLARE_COMPLETION_ONSTACK(all_gone);
4153 blkio_policy_unregister(&blkio_policy_cfq);
4154 elv_unregister(&iosched_cfq);
4155 ioc_gone = &all_gone;
4156
4157 smp_wmb();
4158
4159
4160
4161
4162
4163 if (elv_ioc_count_read(cfq_ioc_count))
4164 wait_for_completion(&all_gone);
4165 ida_destroy(&cic_index_ida);
4166 cfq_slab_kill();
4167}
4168
4169module_init(cfq_init);
4170module_exit(cfq_exit);
4171
4172MODULE_AUTHOR("Jens Axboe");
4173MODULE_LICENSE("GPL");
4174MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
4175