linux/block/blk-mq-sched.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * blk-mq scheduling framework
   4 *
   5 * Copyright (C) 2016 Jens Axboe
   6 */
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/blk-mq.h>
  10#include <linux/list_sort.h>
  11
  12#include <trace/events/block.h>
  13
  14#include "blk.h"
  15#include "blk-mq.h"
  16#include "blk-mq-debugfs.h"
  17#include "blk-mq-sched.h"
  18#include "blk-mq-tag.h"
  19#include "blk-wbt.h"
  20
  21void blk_mq_sched_assign_ioc(struct request *rq)
  22{
  23        struct request_queue *q = rq->q;
  24        struct io_context *ioc;
  25        struct io_cq *icq;
  26
  27        /*
  28         * May not have an IO context if it's a passthrough request
  29         */
  30        ioc = current->io_context;
  31        if (!ioc)
  32                return;
  33
  34        spin_lock_irq(&q->queue_lock);
  35        icq = ioc_lookup_icq(ioc, q);
  36        spin_unlock_irq(&q->queue_lock);
  37
  38        if (!icq) {
  39                icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
  40                if (!icq)
  41                        return;
  42        }
  43        get_io_context(icq->ioc);
  44        rq->elv.icq = icq;
  45}
  46
  47/*
  48 * Mark a hardware queue as needing a restart. For shared queues, maintain
  49 * a count of how many hardware queues are marked for restart.
  50 */
  51void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
  52{
  53        if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  54                return;
  55
  56        set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  57}
  58EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
  59
  60void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
  61{
  62        if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  63                return;
  64        clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  65
  66        /*
  67         * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
  68         * in blk_mq_run_hw_queue(). Its pair is the barrier in
  69         * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
  70         * meantime new request added to hctx->dispatch is missed to check in
  71         * blk_mq_run_hw_queue().
  72         */
  73        smp_mb();
  74
  75        blk_mq_run_hw_queue(hctx, true);
  76}
  77
  78static int sched_rq_cmp(void *priv, const struct list_head *a,
  79                        const struct list_head *b)
  80{
  81        struct request *rqa = container_of(a, struct request, queuelist);
  82        struct request *rqb = container_of(b, struct request, queuelist);
  83
  84        return rqa->mq_hctx > rqb->mq_hctx;
  85}
  86
  87static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
  88{
  89        struct blk_mq_hw_ctx *hctx =
  90                list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
  91        struct request *rq;
  92        LIST_HEAD(hctx_list);
  93        unsigned int count = 0;
  94
  95        list_for_each_entry(rq, rq_list, queuelist) {
  96                if (rq->mq_hctx != hctx) {
  97                        list_cut_before(&hctx_list, rq_list, &rq->queuelist);
  98                        goto dispatch;
  99                }
 100                count++;
 101        }
 102        list_splice_tail_init(rq_list, &hctx_list);
 103
 104dispatch:
 105        return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
 106}
 107
 108#define BLK_MQ_BUDGET_DELAY     3               /* ms units */
 109
 110/*
 111 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
 112 * its queue by itself in its completion handler, so we don't need to
 113 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
 114 *
 115 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
 116 * be run again.  This is necessary to avoid starving flushes.
 117 */
 118static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
 119{
 120        struct request_queue *q = hctx->queue;
 121        struct elevator_queue *e = q->elevator;
 122        bool multi_hctxs = false, run_queue = false;
 123        bool dispatched = false, busy = false;
 124        unsigned int max_dispatch;
 125        LIST_HEAD(rq_list);
 126        int count = 0;
 127
 128        if (hctx->dispatch_busy)
 129                max_dispatch = 1;
 130        else
 131                max_dispatch = hctx->queue->nr_requests;
 132
 133        do {
 134                struct request *rq;
 135                int budget_token;
 136
 137                if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
 138                        break;
 139
 140                if (!list_empty_careful(&hctx->dispatch)) {
 141                        busy = true;
 142                        break;
 143                }
 144
 145                budget_token = blk_mq_get_dispatch_budget(q);
 146                if (budget_token < 0)
 147                        break;
 148
 149                rq = e->type->ops.dispatch_request(hctx);
 150                if (!rq) {
 151                        blk_mq_put_dispatch_budget(q, budget_token);
 152                        /*
 153                         * We're releasing without dispatching. Holding the
 154                         * budget could have blocked any "hctx"s with the
 155                         * same queue and if we didn't dispatch then there's
 156                         * no guarantee anyone will kick the queue.  Kick it
 157                         * ourselves.
 158                         */
 159                        run_queue = true;
 160                        break;
 161                }
 162
 163                blk_mq_set_rq_budget_token(rq, budget_token);
 164
 165                /*
 166                 * Now this rq owns the budget which has to be released
 167                 * if this rq won't be queued to driver via .queue_rq()
 168                 * in blk_mq_dispatch_rq_list().
 169                 */
 170                list_add_tail(&rq->queuelist, &rq_list);
 171                if (rq->mq_hctx != hctx)
 172                        multi_hctxs = true;
 173        } while (++count < max_dispatch);
 174
 175        if (!count) {
 176                if (run_queue)
 177                        blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
 178        } else if (multi_hctxs) {
 179                /*
 180                 * Requests from different hctx may be dequeued from some
 181                 * schedulers, such as bfq and deadline.
 182                 *
 183                 * Sort the requests in the list according to their hctx,
 184                 * dispatch batching requests from same hctx at a time.
 185                 */
 186                list_sort(NULL, &rq_list, sched_rq_cmp);
 187                do {
 188                        dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
 189                } while (!list_empty(&rq_list));
 190        } else {
 191                dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
 192        }
 193
 194        if (busy)
 195                return -EAGAIN;
 196        return !!dispatched;
 197}
 198
 199static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
 200{
 201        int ret;
 202
 203        do {
 204                ret = __blk_mq_do_dispatch_sched(hctx);
 205        } while (ret == 1);
 206
 207        return ret;
 208}
 209
 210static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
 211                                          struct blk_mq_ctx *ctx)
 212{
 213        unsigned short idx = ctx->index_hw[hctx->type];
 214
 215        if (++idx == hctx->nr_ctx)
 216                idx = 0;
 217
 218        return hctx->ctxs[idx];
 219}
 220
 221/*
 222 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
 223 * its queue by itself in its completion handler, so we don't need to
 224 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
 225 *
 226 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
 227 * be run again.  This is necessary to avoid starving flushes.
 228 */
 229static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
 230{
 231        struct request_queue *q = hctx->queue;
 232        LIST_HEAD(rq_list);
 233        struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
 234        int ret = 0;
 235        struct request *rq;
 236
 237        do {
 238                int budget_token;
 239
 240                if (!list_empty_careful(&hctx->dispatch)) {
 241                        ret = -EAGAIN;
 242                        break;
 243                }
 244
 245                if (!sbitmap_any_bit_set(&hctx->ctx_map))
 246                        break;
 247
 248                budget_token = blk_mq_get_dispatch_budget(q);
 249                if (budget_token < 0)
 250                        break;
 251
 252                rq = blk_mq_dequeue_from_ctx(hctx, ctx);
 253                if (!rq) {
 254                        blk_mq_put_dispatch_budget(q, budget_token);
 255                        /*
 256                         * We're releasing without dispatching. Holding the
 257                         * budget could have blocked any "hctx"s with the
 258                         * same queue and if we didn't dispatch then there's
 259                         * no guarantee anyone will kick the queue.  Kick it
 260                         * ourselves.
 261                         */
 262                        blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
 263                        break;
 264                }
 265
 266                blk_mq_set_rq_budget_token(rq, budget_token);
 267
 268                /*
 269                 * Now this rq owns the budget which has to be released
 270                 * if this rq won't be queued to driver via .queue_rq()
 271                 * in blk_mq_dispatch_rq_list().
 272                 */
 273                list_add(&rq->queuelist, &rq_list);
 274
 275                /* round robin for fair dispatch */
 276                ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
 277
 278        } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
 279
 280        WRITE_ONCE(hctx->dispatch_from, ctx);
 281        return ret;
 282}
 283
 284static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 285{
 286        struct request_queue *q = hctx->queue;
 287        struct elevator_queue *e = q->elevator;
 288        const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
 289        int ret = 0;
 290        LIST_HEAD(rq_list);
 291
 292        /*
 293         * If we have previous entries on our dispatch list, grab them first for
 294         * more fair dispatch.
 295         */
 296        if (!list_empty_careful(&hctx->dispatch)) {
 297                spin_lock(&hctx->lock);
 298                if (!list_empty(&hctx->dispatch))
 299                        list_splice_init(&hctx->dispatch, &rq_list);
 300                spin_unlock(&hctx->lock);
 301        }
 302
 303        /*
 304         * Only ask the scheduler for requests, if we didn't have residual
 305         * requests from the dispatch list. This is to avoid the case where
 306         * we only ever dispatch a fraction of the requests available because
 307         * of low device queue depth. Once we pull requests out of the IO
 308         * scheduler, we can no longer merge or sort them. So it's best to
 309         * leave them there for as long as we can. Mark the hw queue as
 310         * needing a restart in that case.
 311         *
 312         * We want to dispatch from the scheduler if there was nothing
 313         * on the dispatch list or we were able to dispatch from the
 314         * dispatch list.
 315         */
 316        if (!list_empty(&rq_list)) {
 317                blk_mq_sched_mark_restart_hctx(hctx);
 318                if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
 319                        if (has_sched_dispatch)
 320                                ret = blk_mq_do_dispatch_sched(hctx);
 321                        else
 322                                ret = blk_mq_do_dispatch_ctx(hctx);
 323                }
 324        } else if (has_sched_dispatch) {
 325                ret = blk_mq_do_dispatch_sched(hctx);
 326        } else if (hctx->dispatch_busy) {
 327                /* dequeue request one by one from sw queue if queue is busy */
 328                ret = blk_mq_do_dispatch_ctx(hctx);
 329        } else {
 330                blk_mq_flush_busy_ctxs(hctx, &rq_list);
 331                blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
 332        }
 333
 334        return ret;
 335}
 336
 337void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 338{
 339        struct request_queue *q = hctx->queue;
 340
 341        /* RCU or SRCU read lock is needed before checking quiesced flag */
 342        if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
 343                return;
 344
 345        hctx->run++;
 346
 347        /*
 348         * A return of -EAGAIN is an indication that hctx->dispatch is not
 349         * empty and we must run again in order to avoid starving flushes.
 350         */
 351        if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
 352                if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
 353                        blk_mq_run_hw_queue(hctx, true);
 354        }
 355}
 356
 357bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
 358                unsigned int nr_segs)
 359{
 360        struct elevator_queue *e = q->elevator;
 361        struct blk_mq_ctx *ctx;
 362        struct blk_mq_hw_ctx *hctx;
 363        bool ret = false;
 364        enum hctx_type type;
 365
 366        if (e && e->type->ops.bio_merge)
 367                return e->type->ops.bio_merge(q, bio, nr_segs);
 368
 369        ctx = blk_mq_get_ctx(q);
 370        hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
 371        type = hctx->type;
 372        if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
 373            list_empty_careful(&ctx->rq_lists[type]))
 374                return false;
 375
 376        /* default per sw-queue merge */
 377        spin_lock(&ctx->lock);
 378        /*
 379         * Reverse check our software queue for entries that we could
 380         * potentially merge with. Currently includes a hand-wavy stop
 381         * count of 8, to not spend too much time checking for merges.
 382         */
 383        if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
 384                ctx->rq_merged++;
 385                ret = true;
 386        }
 387
 388        spin_unlock(&ctx->lock);
 389
 390        return ret;
 391}
 392
 393bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
 394{
 395        return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
 396}
 397EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
 398
 399static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
 400                                       struct request *rq)
 401{
 402        /*
 403         * dispatch flush and passthrough rq directly
 404         *
 405         * passthrough request has to be added to hctx->dispatch directly.
 406         * For some reason, device may be in one situation which can't
 407         * handle FS request, so STS_RESOURCE is always returned and the
 408         * FS request will be added to hctx->dispatch. However passthrough
 409         * request may be required at that time for fixing the problem. If
 410         * passthrough request is added to scheduler queue, there isn't any
 411         * chance to dispatch it given we prioritize requests in hctx->dispatch.
 412         */
 413        if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
 414                return true;
 415
 416        return false;
 417}
 418
 419void blk_mq_sched_insert_request(struct request *rq, bool at_head,
 420                                 bool run_queue, bool async)
 421{
 422        struct request_queue *q = rq->q;
 423        struct elevator_queue *e = q->elevator;
 424        struct blk_mq_ctx *ctx = rq->mq_ctx;
 425        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 426
 427        WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
 428
 429        if (blk_mq_sched_bypass_insert(hctx, rq)) {
 430                /*
 431                 * Firstly normal IO request is inserted to scheduler queue or
 432                 * sw queue, meantime we add flush request to dispatch queue(
 433                 * hctx->dispatch) directly and there is at most one in-flight
 434                 * flush request for each hw queue, so it doesn't matter to add
 435                 * flush request to tail or front of the dispatch queue.
 436                 *
 437                 * Secondly in case of NCQ, flush request belongs to non-NCQ
 438                 * command, and queueing it will fail when there is any
 439                 * in-flight normal IO request(NCQ command). When adding flush
 440                 * rq to the front of hctx->dispatch, it is easier to introduce
 441                 * extra time to flush rq's latency because of S_SCHED_RESTART
 442                 * compared with adding to the tail of dispatch queue, then
 443                 * chance of flush merge is increased, and less flush requests
 444                 * will be issued to controller. It is observed that ~10% time
 445                 * is saved in blktests block/004 on disk attached to AHCI/NCQ
 446                 * drive when adding flush rq to the front of hctx->dispatch.
 447                 *
 448                 * Simply queue flush rq to the front of hctx->dispatch so that
 449                 * intensive flush workloads can benefit in case of NCQ HW.
 450                 */
 451                at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
 452                blk_mq_request_bypass_insert(rq, at_head, false);
 453                goto run;
 454        }
 455
 456        if (e && e->type->ops.insert_requests) {
 457                LIST_HEAD(list);
 458
 459                list_add(&rq->queuelist, &list);
 460                e->type->ops.insert_requests(hctx, &list, at_head);
 461        } else {
 462                spin_lock(&ctx->lock);
 463                __blk_mq_insert_request(hctx, rq, at_head);
 464                spin_unlock(&ctx->lock);
 465        }
 466
 467run:
 468        if (run_queue)
 469                blk_mq_run_hw_queue(hctx, async);
 470}
 471
 472void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
 473                                  struct blk_mq_ctx *ctx,
 474                                  struct list_head *list, bool run_queue_async)
 475{
 476        struct elevator_queue *e;
 477        struct request_queue *q = hctx->queue;
 478
 479        /*
 480         * blk_mq_sched_insert_requests() is called from flush plug
 481         * context only, and hold one usage counter to prevent queue
 482         * from being released.
 483         */
 484        percpu_ref_get(&q->q_usage_counter);
 485
 486        e = hctx->queue->elevator;
 487        if (e && e->type->ops.insert_requests)
 488                e->type->ops.insert_requests(hctx, list, false);
 489        else {
 490                /*
 491                 * try to issue requests directly if the hw queue isn't
 492                 * busy in case of 'none' scheduler, and this way may save
 493                 * us one extra enqueue & dequeue to sw queue.
 494                 */
 495                if (!hctx->dispatch_busy && !e && !run_queue_async) {
 496                        blk_mq_try_issue_list_directly(hctx, list);
 497                        if (list_empty(list))
 498                                goto out;
 499                }
 500                blk_mq_insert_requests(hctx, ctx, list);
 501        }
 502
 503        blk_mq_run_hw_queue(hctx, run_queue_async);
 504 out:
 505        percpu_ref_put(&q->q_usage_counter);
 506}
 507
 508static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
 509                                   struct blk_mq_hw_ctx *hctx,
 510                                   unsigned int hctx_idx)
 511{
 512        unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
 513
 514        if (hctx->sched_tags) {
 515                blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
 516                blk_mq_free_rq_map(hctx->sched_tags, flags);
 517                hctx->sched_tags = NULL;
 518        }
 519}
 520
 521static int blk_mq_sched_alloc_tags(struct request_queue *q,
 522                                   struct blk_mq_hw_ctx *hctx,
 523                                   unsigned int hctx_idx)
 524{
 525        struct blk_mq_tag_set *set = q->tag_set;
 526        /* Clear HCTX_SHARED so tags are init'ed */
 527        unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
 528        int ret;
 529
 530        hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
 531                                               set->reserved_tags, flags);
 532        if (!hctx->sched_tags)
 533                return -ENOMEM;
 534
 535        ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
 536        if (ret)
 537                blk_mq_sched_free_tags(set, hctx, hctx_idx);
 538
 539        return ret;
 540}
 541
 542/* called in queue's release handler, tagset has gone away */
 543static void blk_mq_sched_tags_teardown(struct request_queue *q)
 544{
 545        struct blk_mq_hw_ctx *hctx;
 546        int i;
 547
 548        queue_for_each_hw_ctx(q, hctx, i) {
 549                /* Clear HCTX_SHARED so tags are freed */
 550                unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
 551
 552                if (hctx->sched_tags) {
 553                        blk_mq_free_rq_map(hctx->sched_tags, flags);
 554                        hctx->sched_tags = NULL;
 555                }
 556        }
 557}
 558
 559int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 560{
 561        struct blk_mq_hw_ctx *hctx;
 562        struct elevator_queue *eq;
 563        unsigned int i;
 564        int ret;
 565
 566        if (!e) {
 567                q->elevator = NULL;
 568                q->nr_requests = q->tag_set->queue_depth;
 569                return 0;
 570        }
 571
 572        /*
 573         * Default to double of smaller one between hw queue_depth and 128,
 574         * since we don't split into sync/async like the old code did.
 575         * Additionally, this is a per-hw queue depth.
 576         */
 577        q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
 578                                   BLKDEV_MAX_RQ);
 579
 580        queue_for_each_hw_ctx(q, hctx, i) {
 581                ret = blk_mq_sched_alloc_tags(q, hctx, i);
 582                if (ret)
 583                        goto err;
 584        }
 585
 586        ret = e->ops.init_sched(q, e);
 587        if (ret)
 588                goto err;
 589
 590        blk_mq_debugfs_register_sched(q);
 591
 592        queue_for_each_hw_ctx(q, hctx, i) {
 593                if (e->ops.init_hctx) {
 594                        ret = e->ops.init_hctx(hctx, i);
 595                        if (ret) {
 596                                eq = q->elevator;
 597                                blk_mq_sched_free_requests(q);
 598                                blk_mq_exit_sched(q, eq);
 599                                kobject_put(&eq->kobj);
 600                                return ret;
 601                        }
 602                }
 603                blk_mq_debugfs_register_sched_hctx(q, hctx);
 604        }
 605
 606        return 0;
 607
 608err:
 609        blk_mq_sched_free_requests(q);
 610        blk_mq_sched_tags_teardown(q);
 611        q->elevator = NULL;
 612        return ret;
 613}
 614
 615/*
 616 * called in either blk_queue_cleanup or elevator_switch, tagset
 617 * is required for freeing requests
 618 */
 619void blk_mq_sched_free_requests(struct request_queue *q)
 620{
 621        struct blk_mq_hw_ctx *hctx;
 622        int i;
 623
 624        queue_for_each_hw_ctx(q, hctx, i) {
 625                if (hctx->sched_tags)
 626                        blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
 627        }
 628}
 629
 630void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
 631{
 632        struct blk_mq_hw_ctx *hctx;
 633        unsigned int i;
 634
 635        queue_for_each_hw_ctx(q, hctx, i) {
 636                blk_mq_debugfs_unregister_sched_hctx(hctx);
 637                if (e->type->ops.exit_hctx && hctx->sched_data) {
 638                        e->type->ops.exit_hctx(hctx, i);
 639                        hctx->sched_data = NULL;
 640                }
 641        }
 642        blk_mq_debugfs_unregister_sched(q);
 643        if (e->type->ops.exit_sched)
 644                e->type->ops.exit_sched(e);
 645        blk_mq_sched_tags_teardown(q);
 646        q->elevator = NULL;
 647}
 648