1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/compiler.h>
16#include <linux/rbtree.h>
17#include <linux/sbitmap.h>
18
19#include <trace/events/block.h>
20
21#include "elevator.h"
22#include "blk.h"
23#include "blk-mq.h"
24#include "blk-mq-debugfs.h"
25#include "blk-mq-sched.h"
26
27
28
29
30static const int read_expire = HZ / 2;
31static const int write_expire = 5 * HZ;
32
33
34
35
36static const int prio_aging_expire = 10 * HZ;
37static const int writes_starved = 2;
38static const int fifo_batch = 16;
39
40
41enum dd_data_dir {
42 DD_READ = READ,
43 DD_WRITE = WRITE,
44};
45
46enum { DD_DIR_COUNT = 2 };
47
48enum dd_prio {
49 DD_RT_PRIO = 0,
50 DD_BE_PRIO = 1,
51 DD_IDLE_PRIO = 2,
52 DD_PRIO_MAX = 2,
53};
54
55enum { DD_PRIO_COUNT = 3 };
56
57
58
59
60
61
62struct io_stats_per_prio {
63 uint32_t inserted;
64 uint32_t merged;
65 uint32_t dispatched;
66 atomic_t completed;
67};
68
69
70
71
72
73struct dd_per_prio {
74 struct list_head dispatch;
75 struct rb_root sort_list[DD_DIR_COUNT];
76 struct list_head fifo_list[DD_DIR_COUNT];
77
78 sector_t latest_pos[DD_DIR_COUNT];
79 struct io_stats_per_prio stats;
80};
81
82struct deadline_data {
83
84
85
86
87 struct dd_per_prio per_prio[DD_PRIO_COUNT];
88
89
90 enum dd_data_dir last_dir;
91 unsigned int batching;
92 unsigned int starved;
93
94
95
96
97 int fifo_expire[DD_DIR_COUNT];
98 int fifo_batch;
99 int writes_starved;
100 int front_merges;
101 u32 async_depth;
102 int prio_aging_expire;
103
104 spinlock_t lock;
105 spinlock_t zone_lock;
106};
107
108
109static const enum dd_prio ioprio_class_to_prio[] = {
110 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
111 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
112 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
113 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
114};
115
116static inline struct rb_root *
117deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
118{
119 return &per_prio->sort_list[rq_data_dir(rq)];
120}
121
122
123
124
125
126static u8 dd_rq_ioclass(struct request *rq)
127{
128 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
129}
130
131
132
133
134static inline struct request *
135deadline_earlier_request(struct request *rq)
136{
137 struct rb_node *node = rb_prev(&rq->rb_node);
138
139 if (node)
140 return rb_entry_rq(node);
141
142 return NULL;
143}
144
145
146
147
148static inline struct request *
149deadline_latter_request(struct request *rq)
150{
151 struct rb_node *node = rb_next(&rq->rb_node);
152
153 if (node)
154 return rb_entry_rq(node);
155
156 return NULL;
157}
158
159
160
161
162
163static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
164 enum dd_data_dir data_dir, sector_t pos)
165{
166 struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
167 struct request *rq, *res = NULL;
168
169 if (!node)
170 return NULL;
171
172 rq = rb_entry_rq(node);
173
174
175
176
177
178 if (blk_rq_is_seq_zoned_write(rq))
179 pos = round_down(pos, rq->q->limits.chunk_sectors);
180
181 while (node) {
182 rq = rb_entry_rq(node);
183 if (blk_rq_pos(rq) >= pos) {
184 res = rq;
185 node = node->rb_left;
186 } else {
187 node = node->rb_right;
188 }
189 }
190 return res;
191}
192
193static void
194deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
195{
196 struct rb_root *root = deadline_rb_root(per_prio, rq);
197
198 elv_rb_add(root, rq);
199}
200
201static inline void
202deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
203{
204 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
205}
206
207
208
209
210static void deadline_remove_request(struct request_queue *q,
211 struct dd_per_prio *per_prio,
212 struct request *rq)
213{
214 list_del_init(&rq->queuelist);
215
216
217
218
219 if (!RB_EMPTY_NODE(&rq->rb_node))
220 deadline_del_rq_rb(per_prio, rq);
221
222 elv_rqhash_del(q, rq);
223 if (q->last_merge == rq)
224 q->last_merge = NULL;
225}
226
227static void dd_request_merged(struct request_queue *q, struct request *req,
228 enum elv_merge type)
229{
230 struct deadline_data *dd = q->elevator->elevator_data;
231 const u8 ioprio_class = dd_rq_ioclass(req);
232 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
233 struct dd_per_prio *per_prio = &dd->per_prio[prio];
234
235
236
237
238 if (type == ELEVATOR_FRONT_MERGE) {
239 elv_rb_del(deadline_rb_root(per_prio, req), req);
240 deadline_add_rq_rb(per_prio, req);
241 }
242}
243
244
245
246
247static void dd_merged_requests(struct request_queue *q, struct request *req,
248 struct request *next)
249{
250 struct deadline_data *dd = q->elevator->elevator_data;
251 const u8 ioprio_class = dd_rq_ioclass(next);
252 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
253
254 lockdep_assert_held(&dd->lock);
255
256 dd->per_prio[prio].stats.merged++;
257
258
259
260
261
262 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
263 if (time_before((unsigned long)next->fifo_time,
264 (unsigned long)req->fifo_time)) {
265 list_move(&req->queuelist, &next->queuelist);
266 req->fifo_time = next->fifo_time;
267 }
268 }
269
270
271
272
273 deadline_remove_request(q, &dd->per_prio[prio], next);
274}
275
276
277
278
279static void
280deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
281 struct request *rq)
282{
283
284
285
286 deadline_remove_request(rq->q, per_prio, rq);
287}
288
289
290static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
291{
292 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
293
294 lockdep_assert_held(&dd->lock);
295
296 return stats->inserted - atomic_read(&stats->completed);
297}
298
299
300
301
302
303static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
304 enum dd_data_dir data_dir)
305{
306 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
307
308 return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
309}
310
311
312
313
314static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
315{
316 struct request *prev = deadline_earlier_request(rq);
317
318 if (!prev)
319 return false;
320
321 return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
322}
323
324
325
326
327
328static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
329 struct request *rq)
330{
331 sector_t pos = blk_rq_pos(rq);
332
333 do {
334 pos += blk_rq_sectors(rq);
335 rq = deadline_latter_request(rq);
336 } while (rq && blk_rq_pos(rq) == pos);
337
338 return rq;
339}
340
341
342
343
344
345static struct request *
346deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
347 enum dd_data_dir data_dir)
348{
349 struct request *rq, *rb_rq, *next;
350 unsigned long flags;
351
352 if (list_empty(&per_prio->fifo_list[data_dir]))
353 return NULL;
354
355 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
356 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
357 return rq;
358
359
360
361
362
363
364
365
366 spin_lock_irqsave(&dd->zone_lock, flags);
367 list_for_each_entry_safe(rq, next, &per_prio->fifo_list[DD_WRITE],
368 queuelist) {
369
370 rb_rq = deadline_from_pos(per_prio, data_dir, blk_rq_pos(rq));
371 if (rb_rq && blk_rq_pos(rb_rq) < blk_rq_pos(rq))
372 rq = rb_rq;
373 if (blk_req_can_dispatch_to_zone(rq) &&
374 (blk_queue_nonrot(rq->q) ||
375 !deadline_is_seq_write(dd, rq)))
376 goto out;
377 }
378 rq = NULL;
379out:
380 spin_unlock_irqrestore(&dd->zone_lock, flags);
381
382 return rq;
383}
384
385
386
387
388
389static struct request *
390deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
391 enum dd_data_dir data_dir)
392{
393 struct request *rq;
394 unsigned long flags;
395
396 rq = deadline_from_pos(per_prio, data_dir,
397 per_prio->latest_pos[data_dir]);
398 if (!rq)
399 return NULL;
400
401 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
402 return rq;
403
404
405
406
407
408
409
410
411 spin_lock_irqsave(&dd->zone_lock, flags);
412 while (rq) {
413 if (blk_req_can_dispatch_to_zone(rq))
414 break;
415 if (blk_queue_nonrot(rq->q))
416 rq = deadline_latter_request(rq);
417 else
418 rq = deadline_skip_seq_writes(dd, rq);
419 }
420 spin_unlock_irqrestore(&dd->zone_lock, flags);
421
422 return rq;
423}
424
425
426
427
428
429static bool started_after(struct deadline_data *dd, struct request *rq,
430 unsigned long latest_start)
431{
432 unsigned long start_time = (unsigned long)rq->fifo_time;
433
434 start_time -= dd->fifo_expire[rq_data_dir(rq)];
435
436 return time_after(start_time, latest_start);
437}
438
439
440
441
442
443static struct request *__dd_dispatch_request(struct deadline_data *dd,
444 struct dd_per_prio *per_prio,
445 unsigned long latest_start)
446{
447 struct request *rq, *next_rq;
448 enum dd_data_dir data_dir;
449 enum dd_prio prio;
450 u8 ioprio_class;
451
452 lockdep_assert_held(&dd->lock);
453
454 if (!list_empty(&per_prio->dispatch)) {
455 rq = list_first_entry(&per_prio->dispatch, struct request,
456 queuelist);
457 if (started_after(dd, rq, latest_start))
458 return NULL;
459 list_del_init(&rq->queuelist);
460 data_dir = rq_data_dir(rq);
461 goto done;
462 }
463
464
465
466
467 rq = deadline_next_request(dd, per_prio, dd->last_dir);
468 if (rq && dd->batching < dd->fifo_batch) {
469
470 data_dir = rq_data_dir(rq);
471 goto dispatch_request;
472 }
473
474
475
476
477
478
479 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
480 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
481
482 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
483 (dd->starved++ >= dd->writes_starved))
484 goto dispatch_writes;
485
486 data_dir = DD_READ;
487
488 goto dispatch_find_request;
489 }
490
491
492
493
494
495 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
496dispatch_writes:
497 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
498
499 dd->starved = 0;
500
501 data_dir = DD_WRITE;
502
503 goto dispatch_find_request;
504 }
505
506 return NULL;
507
508dispatch_find_request:
509
510
511
512 next_rq = deadline_next_request(dd, per_prio, data_dir);
513 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
514
515
516
517
518
519 rq = deadline_fifo_request(dd, per_prio, data_dir);
520 } else {
521
522
523
524
525 rq = next_rq;
526 }
527
528
529
530
531
532 if (!rq)
533 return NULL;
534
535 dd->last_dir = data_dir;
536 dd->batching = 0;
537
538dispatch_request:
539 if (started_after(dd, rq, latest_start))
540 return NULL;
541
542
543
544
545 dd->batching++;
546 deadline_move_request(dd, per_prio, rq);
547done:
548 ioprio_class = dd_rq_ioclass(rq);
549 prio = ioprio_class_to_prio[ioprio_class];
550 dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
551 dd->per_prio[prio].stats.dispatched++;
552
553
554
555 blk_req_zone_write_lock(rq);
556 rq->rq_flags |= RQF_STARTED;
557 return rq;
558}
559
560
561
562
563
564static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
565 unsigned long now)
566{
567 struct request *rq;
568 enum dd_prio prio;
569 int prio_cnt;
570
571 lockdep_assert_held(&dd->lock);
572
573 prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
574 !!dd_queued(dd, DD_IDLE_PRIO);
575 if (prio_cnt < 2)
576 return NULL;
577
578 for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
579 rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
580 now - dd->prio_aging_expire);
581 if (rq)
582 return rq;
583 }
584
585 return NULL;
586}
587
588
589
590
591
592
593
594
595
596static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
597{
598 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
599 const unsigned long now = jiffies;
600 struct request *rq;
601 enum dd_prio prio;
602
603 spin_lock(&dd->lock);
604 rq = dd_dispatch_prio_aged_requests(dd, now);
605 if (rq)
606 goto unlock;
607
608
609
610
611
612 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
613 rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
614 if (rq || dd_queued(dd, prio))
615 break;
616 }
617
618unlock:
619 spin_unlock(&dd->lock);
620
621 return rq;
622}
623
624
625
626
627
628static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
629{
630 struct deadline_data *dd = data->q->elevator->elevator_data;
631
632
633 if (op_is_sync(opf) && !op_is_write(opf))
634 return;
635
636
637
638
639
640 data->shallow_depth = dd->async_depth;
641}
642
643
644static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
645{
646 struct request_queue *q = hctx->queue;
647 struct deadline_data *dd = q->elevator->elevator_data;
648 struct blk_mq_tags *tags = hctx->sched_tags;
649 unsigned int shift = tags->bitmap_tags.sb.shift;
650
651 dd->async_depth = max(1U, 3 * (1U << shift) / 4);
652
653 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
654}
655
656
657static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
658{
659 dd_depth_updated(hctx);
660 return 0;
661}
662
663static void dd_exit_sched(struct elevator_queue *e)
664{
665 struct deadline_data *dd = e->elevator_data;
666 enum dd_prio prio;
667
668 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
669 struct dd_per_prio *per_prio = &dd->per_prio[prio];
670 const struct io_stats_per_prio *stats = &per_prio->stats;
671 uint32_t queued;
672
673 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
674 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
675
676 spin_lock(&dd->lock);
677 queued = dd_queued(dd, prio);
678 spin_unlock(&dd->lock);
679
680 WARN_ONCE(queued != 0,
681 "statistics for priority %d: i %u m %u d %u c %u\n",
682 prio, stats->inserted, stats->merged,
683 stats->dispatched, atomic_read(&stats->completed));
684 }
685
686 kfree(dd);
687}
688
689
690
691
692static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
693{
694 struct deadline_data *dd;
695 struct elevator_queue *eq;
696 enum dd_prio prio;
697 int ret = -ENOMEM;
698
699 eq = elevator_alloc(q, e);
700 if (!eq)
701 return ret;
702
703 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
704 if (!dd)
705 goto put_eq;
706
707 eq->elevator_data = dd;
708
709 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
710 struct dd_per_prio *per_prio = &dd->per_prio[prio];
711
712 INIT_LIST_HEAD(&per_prio->dispatch);
713 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
714 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
715 per_prio->sort_list[DD_READ] = RB_ROOT;
716 per_prio->sort_list[DD_WRITE] = RB_ROOT;
717 }
718 dd->fifo_expire[DD_READ] = read_expire;
719 dd->fifo_expire[DD_WRITE] = write_expire;
720 dd->writes_starved = writes_starved;
721 dd->front_merges = 1;
722 dd->last_dir = DD_WRITE;
723 dd->fifo_batch = fifo_batch;
724 dd->prio_aging_expire = prio_aging_expire;
725 spin_lock_init(&dd->lock);
726 spin_lock_init(&dd->zone_lock);
727
728
729 blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
730
731 q->elevator = eq;
732 return 0;
733
734put_eq:
735 kobject_put(&eq->kobj);
736 return ret;
737}
738
739
740
741
742
743static int dd_request_merge(struct request_queue *q, struct request **rq,
744 struct bio *bio)
745{
746 struct deadline_data *dd = q->elevator->elevator_data;
747 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
748 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
749 struct dd_per_prio *per_prio = &dd->per_prio[prio];
750 sector_t sector = bio_end_sector(bio);
751 struct request *__rq;
752
753 if (!dd->front_merges)
754 return ELEVATOR_NO_MERGE;
755
756 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
757 if (__rq) {
758 BUG_ON(sector != blk_rq_pos(__rq));
759
760 if (elv_bio_merge_ok(__rq, bio)) {
761 *rq = __rq;
762 if (blk_discard_mergable(__rq))
763 return ELEVATOR_DISCARD_MERGE;
764 return ELEVATOR_FRONT_MERGE;
765 }
766 }
767
768 return ELEVATOR_NO_MERGE;
769}
770
771
772
773
774
775static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
776 unsigned int nr_segs)
777{
778 struct deadline_data *dd = q->elevator->elevator_data;
779 struct request *free = NULL;
780 bool ret;
781
782 spin_lock(&dd->lock);
783 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
784 spin_unlock(&dd->lock);
785
786 if (free)
787 blk_mq_free_request(free);
788
789 return ret;
790}
791
792
793
794
795static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
796 blk_insert_t flags, struct list_head *free)
797{
798 struct request_queue *q = hctx->queue;
799 struct deadline_data *dd = q->elevator->elevator_data;
800 const enum dd_data_dir data_dir = rq_data_dir(rq);
801 u16 ioprio = req_get_ioprio(rq);
802 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
803 struct dd_per_prio *per_prio;
804 enum dd_prio prio;
805
806 lockdep_assert_held(&dd->lock);
807
808
809
810
811
812 blk_req_zone_write_unlock(rq);
813
814 prio = ioprio_class_to_prio[ioprio_class];
815 per_prio = &dd->per_prio[prio];
816 if (!rq->elv.priv[0]) {
817 per_prio->stats.inserted++;
818 rq->elv.priv[0] = (void *)(uintptr_t)1;
819 }
820
821 if (blk_mq_sched_try_insert_merge(q, rq, free))
822 return;
823
824 trace_block_rq_insert(rq);
825
826 if (flags & BLK_MQ_INSERT_AT_HEAD) {
827 list_add(&rq->queuelist, &per_prio->dispatch);
828 rq->fifo_time = jiffies;
829 } else {
830 struct list_head *insert_before;
831
832 deadline_add_rq_rb(per_prio, rq);
833
834 if (rq_mergeable(rq)) {
835 elv_rqhash_add(q, rq);
836 if (!q->last_merge)
837 q->last_merge = rq;
838 }
839
840
841
842
843 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
844 insert_before = &per_prio->fifo_list[data_dir];
845#ifdef CONFIG_BLK_DEV_ZONED
846
847
848
849
850 if (blk_rq_is_seq_zoned_write(rq)) {
851 struct request *rq2 = deadline_latter_request(rq);
852
853 if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq))
854 insert_before = &rq2->queuelist;
855 }
856#endif
857 list_add_tail(&rq->queuelist, insert_before);
858 }
859}
860
861
862
863
864static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
865 struct list_head *list,
866 blk_insert_t flags)
867{
868 struct request_queue *q = hctx->queue;
869 struct deadline_data *dd = q->elevator->elevator_data;
870 LIST_HEAD(free);
871
872 spin_lock(&dd->lock);
873 while (!list_empty(list)) {
874 struct request *rq;
875
876 rq = list_first_entry(list, struct request, queuelist);
877 list_del_init(&rq->queuelist);
878 dd_insert_request(hctx, rq, flags, &free);
879 }
880 spin_unlock(&dd->lock);
881
882 blk_mq_free_requests(&free);
883}
884
885
886static void dd_prepare_request(struct request *rq)
887{
888 rq->elv.priv[0] = NULL;
889}
890
891static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
892{
893 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
894 enum dd_prio p;
895
896 for (p = 0; p <= DD_PRIO_MAX; p++)
897 if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
898 return true;
899
900 return false;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919static void dd_finish_request(struct request *rq)
920{
921 struct request_queue *q = rq->q;
922 struct deadline_data *dd = q->elevator->elevator_data;
923 const u8 ioprio_class = dd_rq_ioclass(rq);
924 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
925 struct dd_per_prio *per_prio = &dd->per_prio[prio];
926
927
928
929
930
931
932 if (!rq->elv.priv[0])
933 return;
934
935 atomic_inc(&per_prio->stats.completed);
936
937 if (blk_queue_is_zoned(q)) {
938 unsigned long flags;
939
940 spin_lock_irqsave(&dd->zone_lock, flags);
941 blk_req_zone_write_unlock(rq);
942 spin_unlock_irqrestore(&dd->zone_lock, flags);
943
944 if (dd_has_write_work(rq->mq_hctx))
945 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
946 }
947}
948
949static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
950{
951 return !list_empty_careful(&per_prio->dispatch) ||
952 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
953 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
954}
955
956static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
957{
958 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
959 enum dd_prio prio;
960
961 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
962 if (dd_has_work_for_prio(&dd->per_prio[prio]))
963 return true;
964
965 return false;
966}
967
968
969
970
971#define SHOW_INT(__FUNC, __VAR) \
972static ssize_t __FUNC(struct elevator_queue *e, char *page) \
973{ \
974 struct deadline_data *dd = e->elevator_data; \
975 \
976 return sysfs_emit(page, "%d\n", __VAR); \
977}
978#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
979SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
980SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
981SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
982SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
983SHOW_INT(deadline_front_merges_show, dd->front_merges);
984SHOW_INT(deadline_async_depth_show, dd->async_depth);
985SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
986#undef SHOW_INT
987#undef SHOW_JIFFIES
988
989#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
990static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
991{ \
992 struct deadline_data *dd = e->elevator_data; \
993 int __data, __ret; \
994 \
995 __ret = kstrtoint(page, 0, &__data); \
996 if (__ret < 0) \
997 return __ret; \
998 if (__data < (MIN)) \
999 __data = (MIN); \
1000 else if (__data > (MAX)) \
1001 __data = (MAX); \
1002 *(__PTR) = __CONV(__data); \
1003 return count; \
1004}
1005#define STORE_INT(__FUNC, __PTR, MIN, MAX) \
1006 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
1007#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
1008 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
1009STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
1010STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
1011STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
1012STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
1013STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
1014STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
1015STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
1016#undef STORE_FUNCTION
1017#undef STORE_INT
1018#undef STORE_JIFFIES
1019
1020#define DD_ATTR(name) \
1021 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
1022
1023static struct elv_fs_entry deadline_attrs[] = {
1024 DD_ATTR(read_expire),
1025 DD_ATTR(write_expire),
1026 DD_ATTR(writes_starved),
1027 DD_ATTR(front_merges),
1028 DD_ATTR(async_depth),
1029 DD_ATTR(fifo_batch),
1030 DD_ATTR(prio_aging_expire),
1031 __ATTR_NULL
1032};
1033
1034#ifdef CONFIG_BLK_DEBUG_FS
1035#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
1036static void *deadline_##name##_fifo_start(struct seq_file *m, \
1037 loff_t *pos) \
1038 __acquires(&dd->lock) \
1039{ \
1040 struct request_queue *q = m->private; \
1041 struct deadline_data *dd = q->elevator->elevator_data; \
1042 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1043 \
1044 spin_lock(&dd->lock); \
1045 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
1046} \
1047 \
1048static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
1049 loff_t *pos) \
1050{ \
1051 struct request_queue *q = m->private; \
1052 struct deadline_data *dd = q->elevator->elevator_data; \
1053 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1054 \
1055 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
1056} \
1057 \
1058static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
1059 __releases(&dd->lock) \
1060{ \
1061 struct request_queue *q = m->private; \
1062 struct deadline_data *dd = q->elevator->elevator_data; \
1063 \
1064 spin_unlock(&dd->lock); \
1065} \
1066 \
1067static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
1068 .start = deadline_##name##_fifo_start, \
1069 .next = deadline_##name##_fifo_next, \
1070 .stop = deadline_##name##_fifo_stop, \
1071 .show = blk_mq_debugfs_rq_show, \
1072}; \
1073 \
1074static int deadline_##name##_next_rq_show(void *data, \
1075 struct seq_file *m) \
1076{ \
1077 struct request_queue *q = data; \
1078 struct deadline_data *dd = q->elevator->elevator_data; \
1079 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1080 struct request *rq; \
1081 \
1082 rq = deadline_from_pos(per_prio, data_dir, \
1083 per_prio->latest_pos[data_dir]); \
1084 if (rq) \
1085 __blk_mq_debugfs_rq_show(m, rq); \
1086 return 0; \
1087}
1088
1089DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
1090DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
1091DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
1092DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
1093DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
1094DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
1095#undef DEADLINE_DEBUGFS_DDIR_ATTRS
1096
1097static int deadline_batching_show(void *data, struct seq_file *m)
1098{
1099 struct request_queue *q = data;
1100 struct deadline_data *dd = q->elevator->elevator_data;
1101
1102 seq_printf(m, "%u\n", dd->batching);
1103 return 0;
1104}
1105
1106static int deadline_starved_show(void *data, struct seq_file *m)
1107{
1108 struct request_queue *q = data;
1109 struct deadline_data *dd = q->elevator->elevator_data;
1110
1111 seq_printf(m, "%u\n", dd->starved);
1112 return 0;
1113}
1114
1115static int dd_async_depth_show(void *data, struct seq_file *m)
1116{
1117 struct request_queue *q = data;
1118 struct deadline_data *dd = q->elevator->elevator_data;
1119
1120 seq_printf(m, "%u\n", dd->async_depth);
1121 return 0;
1122}
1123
1124static int dd_queued_show(void *data, struct seq_file *m)
1125{
1126 struct request_queue *q = data;
1127 struct deadline_data *dd = q->elevator->elevator_data;
1128 u32 rt, be, idle;
1129
1130 spin_lock(&dd->lock);
1131 rt = dd_queued(dd, DD_RT_PRIO);
1132 be = dd_queued(dd, DD_BE_PRIO);
1133 idle = dd_queued(dd, DD_IDLE_PRIO);
1134 spin_unlock(&dd->lock);
1135
1136 seq_printf(m, "%u %u %u\n", rt, be, idle);
1137
1138 return 0;
1139}
1140
1141
1142static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1143{
1144 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1145
1146 lockdep_assert_held(&dd->lock);
1147
1148 return stats->dispatched + stats->merged -
1149 atomic_read(&stats->completed);
1150}
1151
1152static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1153{
1154 struct request_queue *q = data;
1155 struct deadline_data *dd = q->elevator->elevator_data;
1156 u32 rt, be, idle;
1157
1158 spin_lock(&dd->lock);
1159 rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1160 be = dd_owned_by_driver(dd, DD_BE_PRIO);
1161 idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1162 spin_unlock(&dd->lock);
1163
1164 seq_printf(m, "%u %u %u\n", rt, be, idle);
1165
1166 return 0;
1167}
1168
1169#define DEADLINE_DISPATCH_ATTR(prio) \
1170static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1171 loff_t *pos) \
1172 __acquires(&dd->lock) \
1173{ \
1174 struct request_queue *q = m->private; \
1175 struct deadline_data *dd = q->elevator->elevator_data; \
1176 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1177 \
1178 spin_lock(&dd->lock); \
1179 return seq_list_start(&per_prio->dispatch, *pos); \
1180} \
1181 \
1182static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1183 void *v, loff_t *pos) \
1184{ \
1185 struct request_queue *q = m->private; \
1186 struct deadline_data *dd = q->elevator->elevator_data; \
1187 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1188 \
1189 return seq_list_next(v, &per_prio->dispatch, pos); \
1190} \
1191 \
1192static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1193 __releases(&dd->lock) \
1194{ \
1195 struct request_queue *q = m->private; \
1196 struct deadline_data *dd = q->elevator->elevator_data; \
1197 \
1198 spin_unlock(&dd->lock); \
1199} \
1200 \
1201static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1202 .start = deadline_dispatch##prio##_start, \
1203 .next = deadline_dispatch##prio##_next, \
1204 .stop = deadline_dispatch##prio##_stop, \
1205 .show = blk_mq_debugfs_rq_show, \
1206}
1207
1208DEADLINE_DISPATCH_ATTR(0);
1209DEADLINE_DISPATCH_ATTR(1);
1210DEADLINE_DISPATCH_ATTR(2);
1211#undef DEADLINE_DISPATCH_ATTR
1212
1213#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1214 {#name "_fifo_list", 0400, \
1215 .seq_ops = &deadline_##name##_fifo_seq_ops}
1216#define DEADLINE_NEXT_RQ_ATTR(name) \
1217 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1218static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1219 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1220 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1221 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1222 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1223 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1224 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1225 DEADLINE_NEXT_RQ_ATTR(read0),
1226 DEADLINE_NEXT_RQ_ATTR(write0),
1227 DEADLINE_NEXT_RQ_ATTR(read1),
1228 DEADLINE_NEXT_RQ_ATTR(write1),
1229 DEADLINE_NEXT_RQ_ATTR(read2),
1230 DEADLINE_NEXT_RQ_ATTR(write2),
1231 {"batching", 0400, deadline_batching_show},
1232 {"starved", 0400, deadline_starved_show},
1233 {"async_depth", 0400, dd_async_depth_show},
1234 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1235 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1236 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1237 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1238 {"queued", 0400, dd_queued_show},
1239 {},
1240};
1241#undef DEADLINE_QUEUE_DDIR_ATTRS
1242#endif
1243
1244static struct elevator_type mq_deadline = {
1245 .ops = {
1246 .depth_updated = dd_depth_updated,
1247 .limit_depth = dd_limit_depth,
1248 .insert_requests = dd_insert_requests,
1249 .dispatch_request = dd_dispatch_request,
1250 .prepare_request = dd_prepare_request,
1251 .finish_request = dd_finish_request,
1252 .next_request = elv_rb_latter_request,
1253 .former_request = elv_rb_former_request,
1254 .bio_merge = dd_bio_merge,
1255 .request_merge = dd_request_merge,
1256 .requests_merged = dd_merged_requests,
1257 .request_merged = dd_request_merged,
1258 .has_work = dd_has_work,
1259 .init_sched = dd_init_sched,
1260 .exit_sched = dd_exit_sched,
1261 .init_hctx = dd_init_hctx,
1262 },
1263
1264#ifdef CONFIG_BLK_DEBUG_FS
1265 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1266#endif
1267 .elevator_attrs = deadline_attrs,
1268 .elevator_name = "mq-deadline",
1269 .elevator_alias = "deadline",
1270 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1271 .elevator_owner = THIS_MODULE,
1272};
1273MODULE_ALIAS("mq-deadline-iosched");
1274
1275static int __init deadline_init(void)
1276{
1277 return elv_register(&mq_deadline);
1278}
1279
1280static void __exit deadline_exit(void)
1281{
1282 elv_unregister(&mq_deadline);
1283}
1284
1285module_init(deadline_init);
1286module_exit(deadline_exit);
1287
1288MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1289MODULE_LICENSE("GPL");
1290MODULE_DESCRIPTION("MQ deadline IO scheduler");
1291