1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list_sort.h>
10
11#include <trace/events/block.h>
12
13#include "blk.h"
14#include "blk-mq.h"
15#include "blk-mq-debugfs.h"
16#include "blk-mq-sched.h"
17#include "blk-wbt.h"
18
19
20
21
22void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
23{
24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
25 return;
26
27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
28}
29EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
30
31void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
32{
33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
34
35
36
37
38
39
40
41
42 smp_mb();
43
44 blk_mq_run_hw_queue(hctx, true);
45}
46
47static int sched_rq_cmp(void *priv, const struct list_head *a,
48 const struct list_head *b)
49{
50 struct request *rqa = container_of(a, struct request, queuelist);
51 struct request *rqb = container_of(b, struct request, queuelist);
52
53 return rqa->mq_hctx > rqb->mq_hctx;
54}
55
56static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
57{
58 struct blk_mq_hw_ctx *hctx =
59 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
60 struct request *rq;
61 LIST_HEAD(hctx_list);
62 unsigned int count = 0;
63
64 list_for_each_entry(rq, rq_list, queuelist) {
65 if (rq->mq_hctx != hctx) {
66 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
67 goto dispatch;
68 }
69 count++;
70 }
71 list_splice_tail_init(rq_list, &hctx_list);
72
73dispatch:
74 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
75}
76
77#define BLK_MQ_BUDGET_DELAY 3
78
79
80
81
82
83
84
85
86
87static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
88{
89 struct request_queue *q = hctx->queue;
90 struct elevator_queue *e = q->elevator;
91 bool multi_hctxs = false, run_queue = false;
92 bool dispatched = false, busy = false;
93 unsigned int max_dispatch;
94 LIST_HEAD(rq_list);
95 int count = 0;
96
97 if (hctx->dispatch_busy)
98 max_dispatch = 1;
99 else
100 max_dispatch = hctx->queue->nr_requests;
101
102 do {
103 struct request *rq;
104 int budget_token;
105
106 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
107 break;
108
109 if (!list_empty_careful(&hctx->dispatch)) {
110 busy = true;
111 break;
112 }
113
114 budget_token = blk_mq_get_dispatch_budget(q);
115 if (budget_token < 0)
116 break;
117
118 rq = e->type->ops.dispatch_request(hctx);
119 if (!rq) {
120 blk_mq_put_dispatch_budget(q, budget_token);
121
122
123
124
125
126
127
128 run_queue = true;
129 break;
130 }
131
132 blk_mq_set_rq_budget_token(rq, budget_token);
133
134
135
136
137
138
139 list_add_tail(&rq->queuelist, &rq_list);
140 count++;
141 if (rq->mq_hctx != hctx)
142 multi_hctxs = true;
143
144
145
146
147
148
149
150 if (!blk_mq_get_driver_tag(rq))
151 break;
152 } while (count < max_dispatch);
153
154 if (!count) {
155 if (run_queue)
156 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
157 } else if (multi_hctxs) {
158
159
160
161
162
163
164
165 list_sort(NULL, &rq_list, sched_rq_cmp);
166 do {
167 dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
168 } while (!list_empty(&rq_list));
169 } else {
170 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
171 }
172
173 if (busy)
174 return -EAGAIN;
175 return !!dispatched;
176}
177
178static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
179{
180 unsigned long end = jiffies + HZ;
181 int ret;
182
183 do {
184 ret = __blk_mq_do_dispatch_sched(hctx);
185 if (ret != 1)
186 break;
187 if (need_resched() || time_is_before_jiffies(end)) {
188 blk_mq_delay_run_hw_queue(hctx, 0);
189 break;
190 }
191 } while (1);
192
193 return ret;
194}
195
196static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
197 struct blk_mq_ctx *ctx)
198{
199 unsigned short idx = ctx->index_hw[hctx->type];
200
201 if (++idx == hctx->nr_ctx)
202 idx = 0;
203
204 return hctx->ctxs[idx];
205}
206
207
208
209
210
211
212
213
214
215static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
216{
217 struct request_queue *q = hctx->queue;
218 LIST_HEAD(rq_list);
219 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
220 int ret = 0;
221 struct request *rq;
222
223 do {
224 int budget_token;
225
226 if (!list_empty_careful(&hctx->dispatch)) {
227 ret = -EAGAIN;
228 break;
229 }
230
231 if (!sbitmap_any_bit_set(&hctx->ctx_map))
232 break;
233
234 budget_token = blk_mq_get_dispatch_budget(q);
235 if (budget_token < 0)
236 break;
237
238 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
239 if (!rq) {
240 blk_mq_put_dispatch_budget(q, budget_token);
241
242
243
244
245
246
247
248 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
249 break;
250 }
251
252 blk_mq_set_rq_budget_token(rq, budget_token);
253
254
255
256
257
258
259 list_add(&rq->queuelist, &rq_list);
260
261
262 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
263
264 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
265
266 WRITE_ONCE(hctx->dispatch_from, ctx);
267 return ret;
268}
269
270static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
271{
272 bool need_dispatch = false;
273 LIST_HEAD(rq_list);
274
275
276
277
278
279 if (!list_empty_careful(&hctx->dispatch)) {
280 spin_lock(&hctx->lock);
281 if (!list_empty(&hctx->dispatch))
282 list_splice_init(&hctx->dispatch, &rq_list);
283 spin_unlock(&hctx->lock);
284 }
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299 if (!list_empty(&rq_list)) {
300 blk_mq_sched_mark_restart_hctx(hctx);
301 if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
302 return 0;
303 need_dispatch = true;
304 } else {
305 need_dispatch = hctx->dispatch_busy;
306 }
307
308 if (hctx->queue->elevator)
309 return blk_mq_do_dispatch_sched(hctx);
310
311
312 if (need_dispatch)
313 return blk_mq_do_dispatch_ctx(hctx);
314 blk_mq_flush_busy_ctxs(hctx, &rq_list);
315 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
316 return 0;
317}
318
319void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
320{
321 struct request_queue *q = hctx->queue;
322
323
324 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
325 return;
326
327 hctx->run++;
328
329
330
331
332
333 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
334 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
335 blk_mq_run_hw_queue(hctx, true);
336 }
337}
338
339bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
340 unsigned int nr_segs)
341{
342 struct elevator_queue *e = q->elevator;
343 struct blk_mq_ctx *ctx;
344 struct blk_mq_hw_ctx *hctx;
345 bool ret = false;
346 enum hctx_type type;
347
348 if (e && e->type->ops.bio_merge) {
349 ret = e->type->ops.bio_merge(q, bio, nr_segs);
350 goto out_put;
351 }
352
353 ctx = blk_mq_get_ctx(q);
354 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
355 type = hctx->type;
356 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
357 list_empty_careful(&ctx->rq_lists[type]))
358 goto out_put;
359
360
361 spin_lock(&ctx->lock);
362
363
364
365
366
367 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
368 ret = true;
369
370 spin_unlock(&ctx->lock);
371out_put:
372 return ret;
373}
374
375bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
376 struct list_head *free)
377{
378 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
379}
380EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
381
382static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
383 struct blk_mq_hw_ctx *hctx,
384 unsigned int hctx_idx)
385{
386 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
387 hctx->sched_tags = q->sched_shared_tags;
388 return 0;
389 }
390
391 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
392 q->nr_requests);
393
394 if (!hctx->sched_tags)
395 return -ENOMEM;
396 return 0;
397}
398
399static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
400{
401 blk_mq_free_rq_map(queue->sched_shared_tags);
402 queue->sched_shared_tags = NULL;
403}
404
405
406static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
407{
408 struct blk_mq_hw_ctx *hctx;
409 unsigned long i;
410
411 queue_for_each_hw_ctx(q, hctx, i) {
412 if (hctx->sched_tags) {
413 if (!blk_mq_is_shared_tags(flags))
414 blk_mq_free_rq_map(hctx->sched_tags);
415 hctx->sched_tags = NULL;
416 }
417 }
418
419 if (blk_mq_is_shared_tags(flags))
420 blk_mq_exit_sched_shared_tags(q);
421}
422
423static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
424{
425 struct blk_mq_tag_set *set = queue->tag_set;
426
427
428
429
430
431 queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
432 BLK_MQ_NO_HCTX_IDX,
433 MAX_SCHED_RQ);
434 if (!queue->sched_shared_tags)
435 return -ENOMEM;
436
437 blk_mq_tag_update_sched_shared_tags(queue);
438
439 return 0;
440}
441
442
443int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
444{
445 unsigned int flags = q->tag_set->flags;
446 struct blk_mq_hw_ctx *hctx;
447 struct elevator_queue *eq;
448 unsigned long i;
449 int ret;
450
451
452
453
454
455
456 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
457 BLKDEV_DEFAULT_RQ);
458
459 if (blk_mq_is_shared_tags(flags)) {
460 ret = blk_mq_init_sched_shared_tags(q);
461 if (ret)
462 return ret;
463 }
464
465 queue_for_each_hw_ctx(q, hctx, i) {
466 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
467 if (ret)
468 goto err_free_map_and_rqs;
469 }
470
471 ret = e->ops.init_sched(q, e);
472 if (ret)
473 goto err_free_map_and_rqs;
474
475 mutex_lock(&q->debugfs_mutex);
476 blk_mq_debugfs_register_sched(q);
477 mutex_unlock(&q->debugfs_mutex);
478
479 queue_for_each_hw_ctx(q, hctx, i) {
480 if (e->ops.init_hctx) {
481 ret = e->ops.init_hctx(hctx, i);
482 if (ret) {
483 eq = q->elevator;
484 blk_mq_sched_free_rqs(q);
485 blk_mq_exit_sched(q, eq);
486 kobject_put(&eq->kobj);
487 return ret;
488 }
489 }
490 mutex_lock(&q->debugfs_mutex);
491 blk_mq_debugfs_register_sched_hctx(q, hctx);
492 mutex_unlock(&q->debugfs_mutex);
493 }
494
495 return 0;
496
497err_free_map_and_rqs:
498 blk_mq_sched_free_rqs(q);
499 blk_mq_sched_tags_teardown(q, flags);
500
501 q->elevator = NULL;
502 return ret;
503}
504
505
506
507
508
509void blk_mq_sched_free_rqs(struct request_queue *q)
510{
511 struct blk_mq_hw_ctx *hctx;
512 unsigned long i;
513
514 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
515 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
516 BLK_MQ_NO_HCTX_IDX);
517 } else {
518 queue_for_each_hw_ctx(q, hctx, i) {
519 if (hctx->sched_tags)
520 blk_mq_free_rqs(q->tag_set,
521 hctx->sched_tags, i);
522 }
523 }
524}
525
526void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
527{
528 struct blk_mq_hw_ctx *hctx;
529 unsigned long i;
530 unsigned int flags = 0;
531
532 queue_for_each_hw_ctx(q, hctx, i) {
533 mutex_lock(&q->debugfs_mutex);
534 blk_mq_debugfs_unregister_sched_hctx(hctx);
535 mutex_unlock(&q->debugfs_mutex);
536
537 if (e->type->ops.exit_hctx && hctx->sched_data) {
538 e->type->ops.exit_hctx(hctx, i);
539 hctx->sched_data = NULL;
540 }
541 flags = hctx->flags;
542 }
543
544 mutex_lock(&q->debugfs_mutex);
545 blk_mq_debugfs_unregister_sched(q);
546 mutex_unlock(&q->debugfs_mutex);
547
548 if (e->type->ops.exit_sched)
549 e->type->ops.exit_sched(e);
550 blk_mq_sched_tags_teardown(q, flags);
551 q->elevator = NULL;
552}
553