1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/blk-mq.h>
10#include <linux/list_sort.h>
11
12#include <trace/events/block.h>
13
14#include "blk.h"
15#include "blk-mq.h"
16#include "blk-mq-debugfs.h"
17#include "blk-mq-sched.h"
18#include "blk-mq-tag.h"
19#include "blk-wbt.h"
20
21void blk_mq_sched_assign_ioc(struct request *rq)
22{
23 struct request_queue *q = rq->q;
24 struct io_context *ioc;
25 struct io_cq *icq;
26
27
28
29
30 ioc = current->io_context;
31 if (!ioc)
32 return;
33
34 spin_lock_irq(&q->queue_lock);
35 icq = ioc_lookup_icq(ioc, q);
36 spin_unlock_irq(&q->queue_lock);
37
38 if (!icq) {
39 icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
40 if (!icq)
41 return;
42 }
43 get_io_context(icq->ioc);
44 rq->elv.icq = icq;
45}
46
47
48
49
50
51void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
52{
53 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
54 return;
55
56 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
57}
58EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
59
60void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
61{
62 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
63 return;
64 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
65
66
67
68
69
70
71
72
73 smp_mb();
74
75 blk_mq_run_hw_queue(hctx, true);
76}
77
78static int sched_rq_cmp(void *priv, const struct list_head *a,
79 const struct list_head *b)
80{
81 struct request *rqa = container_of(a, struct request, queuelist);
82 struct request *rqb = container_of(b, struct request, queuelist);
83
84 return rqa->mq_hctx > rqb->mq_hctx;
85}
86
87static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
88{
89 struct blk_mq_hw_ctx *hctx =
90 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
91 struct request *rq;
92 LIST_HEAD(hctx_list);
93 unsigned int count = 0;
94
95 list_for_each_entry(rq, rq_list, queuelist) {
96 if (rq->mq_hctx != hctx) {
97 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
98 goto dispatch;
99 }
100 count++;
101 }
102 list_splice_tail_init(rq_list, &hctx_list);
103
104dispatch:
105 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
106}
107
108#define BLK_MQ_BUDGET_DELAY 3
109
110
111
112
113
114
115
116
117
118static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
119{
120 struct request_queue *q = hctx->queue;
121 struct elevator_queue *e = q->elevator;
122 bool multi_hctxs = false, run_queue = false;
123 bool dispatched = false, busy = false;
124 unsigned int max_dispatch;
125 LIST_HEAD(rq_list);
126 int count = 0;
127
128 if (hctx->dispatch_busy)
129 max_dispatch = 1;
130 else
131 max_dispatch = hctx->queue->nr_requests;
132
133 do {
134 struct request *rq;
135 int budget_token;
136
137 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
138 break;
139
140 if (!list_empty_careful(&hctx->dispatch)) {
141 busy = true;
142 break;
143 }
144
145 budget_token = blk_mq_get_dispatch_budget(q);
146 if (budget_token < 0)
147 break;
148
149 rq = e->type->ops.dispatch_request(hctx);
150 if (!rq) {
151 blk_mq_put_dispatch_budget(q, budget_token);
152
153
154
155
156
157
158
159 run_queue = true;
160 break;
161 }
162
163 blk_mq_set_rq_budget_token(rq, budget_token);
164
165
166
167
168
169
170 list_add_tail(&rq->queuelist, &rq_list);
171 if (rq->mq_hctx != hctx)
172 multi_hctxs = true;
173 } while (++count < max_dispatch);
174
175 if (!count) {
176 if (run_queue)
177 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
178 } else if (multi_hctxs) {
179
180
181
182
183
184
185
186 list_sort(NULL, &rq_list, sched_rq_cmp);
187 do {
188 dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
189 } while (!list_empty(&rq_list));
190 } else {
191 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
192 }
193
194 if (busy)
195 return -EAGAIN;
196 return !!dispatched;
197}
198
199static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
200{
201 int ret;
202
203 do {
204 ret = __blk_mq_do_dispatch_sched(hctx);
205 } while (ret == 1);
206
207 return ret;
208}
209
210static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
211 struct blk_mq_ctx *ctx)
212{
213 unsigned short idx = ctx->index_hw[hctx->type];
214
215 if (++idx == hctx->nr_ctx)
216 idx = 0;
217
218 return hctx->ctxs[idx];
219}
220
221
222
223
224
225
226
227
228
229static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
230{
231 struct request_queue *q = hctx->queue;
232 LIST_HEAD(rq_list);
233 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
234 int ret = 0;
235 struct request *rq;
236
237 do {
238 int budget_token;
239
240 if (!list_empty_careful(&hctx->dispatch)) {
241 ret = -EAGAIN;
242 break;
243 }
244
245 if (!sbitmap_any_bit_set(&hctx->ctx_map))
246 break;
247
248 budget_token = blk_mq_get_dispatch_budget(q);
249 if (budget_token < 0)
250 break;
251
252 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
253 if (!rq) {
254 blk_mq_put_dispatch_budget(q, budget_token);
255
256
257
258
259
260
261
262 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
263 break;
264 }
265
266 blk_mq_set_rq_budget_token(rq, budget_token);
267
268
269
270
271
272
273 list_add(&rq->queuelist, &rq_list);
274
275
276 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
277
278 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
279
280 WRITE_ONCE(hctx->dispatch_from, ctx);
281 return ret;
282}
283
284static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
285{
286 struct request_queue *q = hctx->queue;
287 struct elevator_queue *e = q->elevator;
288 const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
289 int ret = 0;
290 LIST_HEAD(rq_list);
291
292
293
294
295
296 if (!list_empty_careful(&hctx->dispatch)) {
297 spin_lock(&hctx->lock);
298 if (!list_empty(&hctx->dispatch))
299 list_splice_init(&hctx->dispatch, &rq_list);
300 spin_unlock(&hctx->lock);
301 }
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316 if (!list_empty(&rq_list)) {
317 blk_mq_sched_mark_restart_hctx(hctx);
318 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
319 if (has_sched_dispatch)
320 ret = blk_mq_do_dispatch_sched(hctx);
321 else
322 ret = blk_mq_do_dispatch_ctx(hctx);
323 }
324 } else if (has_sched_dispatch) {
325 ret = blk_mq_do_dispatch_sched(hctx);
326 } else if (hctx->dispatch_busy) {
327
328 ret = blk_mq_do_dispatch_ctx(hctx);
329 } else {
330 blk_mq_flush_busy_ctxs(hctx, &rq_list);
331 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
332 }
333
334 return ret;
335}
336
337void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
338{
339 struct request_queue *q = hctx->queue;
340
341
342 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
343 return;
344
345 hctx->run++;
346
347
348
349
350
351 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
352 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
353 blk_mq_run_hw_queue(hctx, true);
354 }
355}
356
357bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
358 unsigned int nr_segs)
359{
360 struct elevator_queue *e = q->elevator;
361 struct blk_mq_ctx *ctx;
362 struct blk_mq_hw_ctx *hctx;
363 bool ret = false;
364 enum hctx_type type;
365
366 if (e && e->type->ops.bio_merge)
367 return e->type->ops.bio_merge(q, bio, nr_segs);
368
369 ctx = blk_mq_get_ctx(q);
370 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
371 type = hctx->type;
372 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
373 list_empty_careful(&ctx->rq_lists[type]))
374 return false;
375
376
377 spin_lock(&ctx->lock);
378
379
380
381
382
383 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
384 ctx->rq_merged++;
385 ret = true;
386 }
387
388 spin_unlock(&ctx->lock);
389
390 return ret;
391}
392
393bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
394{
395 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
396}
397EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
398
399static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
400 struct request *rq)
401{
402
403
404
405
406
407
408
409
410
411
412
413 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
414 return true;
415
416 return false;
417}
418
419void blk_mq_sched_insert_request(struct request *rq, bool at_head,
420 bool run_queue, bool async)
421{
422 struct request_queue *q = rq->q;
423 struct elevator_queue *e = q->elevator;
424 struct blk_mq_ctx *ctx = rq->mq_ctx;
425 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
426
427 WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
428
429 if (blk_mq_sched_bypass_insert(hctx, rq)) {
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
452 blk_mq_request_bypass_insert(rq, at_head, false);
453 goto run;
454 }
455
456 if (e && e->type->ops.insert_requests) {
457 LIST_HEAD(list);
458
459 list_add(&rq->queuelist, &list);
460 e->type->ops.insert_requests(hctx, &list, at_head);
461 } else {
462 spin_lock(&ctx->lock);
463 __blk_mq_insert_request(hctx, rq, at_head);
464 spin_unlock(&ctx->lock);
465 }
466
467run:
468 if (run_queue)
469 blk_mq_run_hw_queue(hctx, async);
470}
471
472void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
473 struct blk_mq_ctx *ctx,
474 struct list_head *list, bool run_queue_async)
475{
476 struct elevator_queue *e;
477 struct request_queue *q = hctx->queue;
478
479
480
481
482
483
484 percpu_ref_get(&q->q_usage_counter);
485
486 e = hctx->queue->elevator;
487 if (e && e->type->ops.insert_requests)
488 e->type->ops.insert_requests(hctx, list, false);
489 else {
490
491
492
493
494
495 if (!hctx->dispatch_busy && !e && !run_queue_async) {
496 blk_mq_try_issue_list_directly(hctx, list);
497 if (list_empty(list))
498 goto out;
499 }
500 blk_mq_insert_requests(hctx, ctx, list);
501 }
502
503 blk_mq_run_hw_queue(hctx, run_queue_async);
504 out:
505 percpu_ref_put(&q->q_usage_counter);
506}
507
508static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
509 struct blk_mq_hw_ctx *hctx,
510 unsigned int hctx_idx)
511{
512 unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
513
514 if (hctx->sched_tags) {
515 blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
516 blk_mq_free_rq_map(hctx->sched_tags, flags);
517 hctx->sched_tags = NULL;
518 }
519}
520
521static int blk_mq_sched_alloc_tags(struct request_queue *q,
522 struct blk_mq_hw_ctx *hctx,
523 unsigned int hctx_idx)
524{
525 struct blk_mq_tag_set *set = q->tag_set;
526
527 unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
528 int ret;
529
530 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
531 set->reserved_tags, flags);
532 if (!hctx->sched_tags)
533 return -ENOMEM;
534
535 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
536 if (ret)
537 blk_mq_sched_free_tags(set, hctx, hctx_idx);
538
539 return ret;
540}
541
542
543static void blk_mq_sched_tags_teardown(struct request_queue *q)
544{
545 struct blk_mq_hw_ctx *hctx;
546 int i;
547
548 queue_for_each_hw_ctx(q, hctx, i) {
549
550 unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
551
552 if (hctx->sched_tags) {
553 blk_mq_free_rq_map(hctx->sched_tags, flags);
554 hctx->sched_tags = NULL;
555 }
556 }
557}
558
559int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
560{
561 struct blk_mq_hw_ctx *hctx;
562 struct elevator_queue *eq;
563 unsigned int i;
564 int ret;
565
566 if (!e) {
567 q->elevator = NULL;
568 q->nr_requests = q->tag_set->queue_depth;
569 return 0;
570 }
571
572
573
574
575
576
577 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
578 BLKDEV_MAX_RQ);
579
580 queue_for_each_hw_ctx(q, hctx, i) {
581 ret = blk_mq_sched_alloc_tags(q, hctx, i);
582 if (ret)
583 goto err;
584 }
585
586 ret = e->ops.init_sched(q, e);
587 if (ret)
588 goto err;
589
590 blk_mq_debugfs_register_sched(q);
591
592 queue_for_each_hw_ctx(q, hctx, i) {
593 if (e->ops.init_hctx) {
594 ret = e->ops.init_hctx(hctx, i);
595 if (ret) {
596 eq = q->elevator;
597 blk_mq_sched_free_requests(q);
598 blk_mq_exit_sched(q, eq);
599 kobject_put(&eq->kobj);
600 return ret;
601 }
602 }
603 blk_mq_debugfs_register_sched_hctx(q, hctx);
604 }
605
606 return 0;
607
608err:
609 blk_mq_sched_free_requests(q);
610 blk_mq_sched_tags_teardown(q);
611 q->elevator = NULL;
612 return ret;
613}
614
615
616
617
618
619void blk_mq_sched_free_requests(struct request_queue *q)
620{
621 struct blk_mq_hw_ctx *hctx;
622 int i;
623
624 queue_for_each_hw_ctx(q, hctx, i) {
625 if (hctx->sched_tags)
626 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
627 }
628}
629
630void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
631{
632 struct blk_mq_hw_ctx *hctx;
633 unsigned int i;
634
635 queue_for_each_hw_ctx(q, hctx, i) {
636 blk_mq_debugfs_unregister_sched_hctx(hctx);
637 if (e->type->ops.exit_hctx && hctx->sched_data) {
638 e->type->ops.exit_hctx(hctx, i);
639 hctx->sched_data = NULL;
640 }
641 }
642 blk_mq_debugfs_unregister_sched(q);
643 if (e->type->ops.exit_sched)
644 e->type->ops.exit_sched(e);
645 blk_mq_sched_tags_teardown(q);
646 q->elevator = NULL;
647}
648