linux/block/elevator.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Block device elevator/IO-scheduler.
   4 *
   5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   6 *
   7 * 30042000 Jens Axboe <axboe@kernel.dk> :
   8 *
   9 * Split the elevator a bit so that it is possible to choose a different
  10 * one or even write a new "plug in". There are three pieces:
  11 * - elevator_fn, inserts a new request in the queue list
  12 * - elevator_merge_fn, decides whether a new buffer can be merged with
  13 *   an existing request
  14 * - elevator_dequeue_fn, called when a request is taken off the active list
  15 *
  16 * 20082000 Dave Jones <davej@suse.de> :
  17 * Removed tests for max-bomb-segments, which was breaking elvtune
  18 *  when run without -bN
  19 *
  20 * Jens:
  21 * - Rework again to work with bio instead of buffer_heads
  22 * - loose bi_dev comparisons, partition handling is right now
  23 * - completely modularize elevator setup and teardown
  24 *
  25 */
  26#include <linux/kernel.h>
  27#include <linux/fs.h>
  28#include <linux/blkdev.h>
  29#include <linux/elevator.h>
  30#include <linux/bio.h>
  31#include <linux/module.h>
  32#include <linux/slab.h>
  33#include <linux/init.h>
  34#include <linux/compiler.h>
  35#include <linux/blktrace_api.h>
  36#include <linux/hash.h>
  37#include <linux/uaccess.h>
  38#include <linux/pm_runtime.h>
  39#include <linux/blk-cgroup.h>
  40
  41#include <trace/events/block.h>
  42
  43#include "blk.h"
  44#include "blk-mq-sched.h"
  45#include "blk-pm.h"
  46#include "blk-wbt.h"
  47
  48static DEFINE_SPINLOCK(elv_list_lock);
  49static LIST_HEAD(elv_list);
  50
  51/*
  52 * Merge hash stuff.
  53 */
  54#define rq_hash_key(rq)         (blk_rq_pos(rq) + blk_rq_sectors(rq))
  55
  56/*
  57 * Query io scheduler to see if the current process issuing bio may be
  58 * merged with rq.
  59 */
  60static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
  61{
  62        struct request_queue *q = rq->q;
  63        struct elevator_queue *e = q->elevator;
  64
  65        if (e->type->ops.allow_merge)
  66                return e->type->ops.allow_merge(q, rq, bio);
  67
  68        return 1;
  69}
  70
  71/*
  72 * can we safely merge with this request?
  73 */
  74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
  75{
  76        if (!blk_rq_merge_ok(rq, bio))
  77                return false;
  78
  79        if (!elv_iosched_allow_bio_merge(rq, bio))
  80                return false;
  81
  82        return true;
  83}
  84EXPORT_SYMBOL(elv_bio_merge_ok);
  85
  86static inline bool elv_support_features(unsigned int elv_features,
  87                                        unsigned int required_features)
  88{
  89        return (required_features & elv_features) == required_features;
  90}
  91
  92/**
  93 * elevator_match - Test an elevator name and features
  94 * @e: Scheduler to test
  95 * @name: Elevator name to test
  96 * @required_features: Features that the elevator must provide
  97 *
  98 * Return true if the elevator @e name matches @name and if @e provides all
  99 * the features specified by @required_features.
 100 */
 101static bool elevator_match(const struct elevator_type *e, const char *name,
 102                           unsigned int required_features)
 103{
 104        if (!elv_support_features(e->elevator_features, required_features))
 105                return false;
 106        if (!strcmp(e->elevator_name, name))
 107                return true;
 108        if (e->elevator_alias && !strcmp(e->elevator_alias, name))
 109                return true;
 110
 111        return false;
 112}
 113
 114/**
 115 * elevator_find - Find an elevator
 116 * @name: Name of the elevator to find
 117 * @required_features: Features that the elevator must provide
 118 *
 119 * Return the first registered scheduler with name @name and supporting the
 120 * features @required_features and NULL otherwise.
 121 */
 122static struct elevator_type *elevator_find(const char *name,
 123                                           unsigned int required_features)
 124{
 125        struct elevator_type *e;
 126
 127        list_for_each_entry(e, &elv_list, list) {
 128                if (elevator_match(e, name, required_features))
 129                        return e;
 130        }
 131
 132        return NULL;
 133}
 134
 135static void elevator_put(struct elevator_type *e)
 136{
 137        module_put(e->elevator_owner);
 138}
 139
 140static struct elevator_type *elevator_get(struct request_queue *q,
 141                                          const char *name, bool try_loading)
 142{
 143        struct elevator_type *e;
 144
 145        spin_lock(&elv_list_lock);
 146
 147        e = elevator_find(name, q->required_elevator_features);
 148        if (!e && try_loading) {
 149                spin_unlock(&elv_list_lock);
 150                request_module("%s-iosched", name);
 151                spin_lock(&elv_list_lock);
 152                e = elevator_find(name, q->required_elevator_features);
 153        }
 154
 155        if (e && !try_module_get(e->elevator_owner))
 156                e = NULL;
 157
 158        spin_unlock(&elv_list_lock);
 159        return e;
 160}
 161
 162static struct kobj_type elv_ktype;
 163
 164struct elevator_queue *elevator_alloc(struct request_queue *q,
 165                                  struct elevator_type *e)
 166{
 167        struct elevator_queue *eq;
 168
 169        eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
 170        if (unlikely(!eq))
 171                return NULL;
 172
 173        eq->type = e;
 174        kobject_init(&eq->kobj, &elv_ktype);
 175        mutex_init(&eq->sysfs_lock);
 176        hash_init(eq->hash);
 177
 178        return eq;
 179}
 180EXPORT_SYMBOL(elevator_alloc);
 181
 182static void elevator_release(struct kobject *kobj)
 183{
 184        struct elevator_queue *e;
 185
 186        e = container_of(kobj, struct elevator_queue, kobj);
 187        elevator_put(e->type);
 188        kfree(e);
 189}
 190
 191void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
 192{
 193        mutex_lock(&e->sysfs_lock);
 194        blk_mq_exit_sched(q, e);
 195        mutex_unlock(&e->sysfs_lock);
 196
 197        kobject_put(&e->kobj);
 198}
 199
 200static inline void __elv_rqhash_del(struct request *rq)
 201{
 202        hash_del(&rq->hash);
 203        rq->rq_flags &= ~RQF_HASHED;
 204}
 205
 206void elv_rqhash_del(struct request_queue *q, struct request *rq)
 207{
 208        if (ELV_ON_HASH(rq))
 209                __elv_rqhash_del(rq);
 210}
 211EXPORT_SYMBOL_GPL(elv_rqhash_del);
 212
 213void elv_rqhash_add(struct request_queue *q, struct request *rq)
 214{
 215        struct elevator_queue *e = q->elevator;
 216
 217        BUG_ON(ELV_ON_HASH(rq));
 218        hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 219        rq->rq_flags |= RQF_HASHED;
 220}
 221EXPORT_SYMBOL_GPL(elv_rqhash_add);
 222
 223void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 224{
 225        __elv_rqhash_del(rq);
 226        elv_rqhash_add(q, rq);
 227}
 228
 229struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 230{
 231        struct elevator_queue *e = q->elevator;
 232        struct hlist_node *next;
 233        struct request *rq;
 234
 235        hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
 236                BUG_ON(!ELV_ON_HASH(rq));
 237
 238                if (unlikely(!rq_mergeable(rq))) {
 239                        __elv_rqhash_del(rq);
 240                        continue;
 241                }
 242
 243                if (rq_hash_key(rq) == offset)
 244                        return rq;
 245        }
 246
 247        return NULL;
 248}
 249
 250/*
 251 * RB-tree support functions for inserting/lookup/removal of requests
 252 * in a sorted RB tree.
 253 */
 254void elv_rb_add(struct rb_root *root, struct request *rq)
 255{
 256        struct rb_node **p = &root->rb_node;
 257        struct rb_node *parent = NULL;
 258        struct request *__rq;
 259
 260        while (*p) {
 261                parent = *p;
 262                __rq = rb_entry(parent, struct request, rb_node);
 263
 264                if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 265                        p = &(*p)->rb_left;
 266                else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 267                        p = &(*p)->rb_right;
 268        }
 269
 270        rb_link_node(&rq->rb_node, parent, p);
 271        rb_insert_color(&rq->rb_node, root);
 272}
 273EXPORT_SYMBOL(elv_rb_add);
 274
 275void elv_rb_del(struct rb_root *root, struct request *rq)
 276{
 277        BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 278        rb_erase(&rq->rb_node, root);
 279        RB_CLEAR_NODE(&rq->rb_node);
 280}
 281EXPORT_SYMBOL(elv_rb_del);
 282
 283struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 284{
 285        struct rb_node *n = root->rb_node;
 286        struct request *rq;
 287
 288        while (n) {
 289                rq = rb_entry(n, struct request, rb_node);
 290
 291                if (sector < blk_rq_pos(rq))
 292                        n = n->rb_left;
 293                else if (sector > blk_rq_pos(rq))
 294                        n = n->rb_right;
 295                else
 296                        return rq;
 297        }
 298
 299        return NULL;
 300}
 301EXPORT_SYMBOL(elv_rb_find);
 302
 303enum elv_merge elv_merge(struct request_queue *q, struct request **req,
 304                struct bio *bio)
 305{
 306        struct elevator_queue *e = q->elevator;
 307        struct request *__rq;
 308
 309        /*
 310         * Levels of merges:
 311         *      nomerges:  No merges at all attempted
 312         *      noxmerges: Only simple one-hit cache try
 313         *      merges:    All merge tries attempted
 314         */
 315        if (blk_queue_nomerges(q) || !bio_mergeable(bio))
 316                return ELEVATOR_NO_MERGE;
 317
 318        /*
 319         * First try one-hit cache.
 320         */
 321        if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
 322                enum elv_merge ret = blk_try_merge(q->last_merge, bio);
 323
 324                if (ret != ELEVATOR_NO_MERGE) {
 325                        *req = q->last_merge;
 326                        return ret;
 327                }
 328        }
 329
 330        if (blk_queue_noxmerges(q))
 331                return ELEVATOR_NO_MERGE;
 332
 333        /*
 334         * See if our hash lookup can find a potential backmerge.
 335         */
 336        __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
 337        if (__rq && elv_bio_merge_ok(__rq, bio)) {
 338                *req = __rq;
 339                return ELEVATOR_BACK_MERGE;
 340        }
 341
 342        if (e->type->ops.request_merge)
 343                return e->type->ops.request_merge(q, req, bio);
 344
 345        return ELEVATOR_NO_MERGE;
 346}
 347
 348/*
 349 * Attempt to do an insertion back merge. Only check for the case where
 350 * we can append 'rq' to an existing request, so we can throw 'rq' away
 351 * afterwards.
 352 *
 353 * Returns true if we merged, false otherwise
 354 */
 355bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
 356{
 357        struct request *__rq;
 358        bool ret;
 359
 360        if (blk_queue_nomerges(q))
 361                return false;
 362
 363        /*
 364         * First try one-hit cache.
 365         */
 366        if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 367                return true;
 368
 369        if (blk_queue_noxmerges(q))
 370                return false;
 371
 372        ret = false;
 373        /*
 374         * See if our hash lookup can find a potential backmerge.
 375         */
 376        while (1) {
 377                __rq = elv_rqhash_find(q, blk_rq_pos(rq));
 378                if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
 379                        break;
 380
 381                /* The merged request could be merged with others, try again */
 382                ret = true;
 383                rq = __rq;
 384        }
 385
 386        return ret;
 387}
 388
 389void elv_merged_request(struct request_queue *q, struct request *rq,
 390                enum elv_merge type)
 391{
 392        struct elevator_queue *e = q->elevator;
 393
 394        if (e->type->ops.request_merged)
 395                e->type->ops.request_merged(q, rq, type);
 396
 397        if (type == ELEVATOR_BACK_MERGE)
 398                elv_rqhash_reposition(q, rq);
 399
 400        q->last_merge = rq;
 401}
 402
 403void elv_merge_requests(struct request_queue *q, struct request *rq,
 404                             struct request *next)
 405{
 406        struct elevator_queue *e = q->elevator;
 407
 408        if (e->type->ops.requests_merged)
 409                e->type->ops.requests_merged(q, rq, next);
 410
 411        elv_rqhash_reposition(q, rq);
 412        q->last_merge = rq;
 413}
 414
 415struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 416{
 417        struct elevator_queue *e = q->elevator;
 418
 419        if (e->type->ops.next_request)
 420                return e->type->ops.next_request(q, rq);
 421
 422        return NULL;
 423}
 424
 425struct request *elv_former_request(struct request_queue *q, struct request *rq)
 426{
 427        struct elevator_queue *e = q->elevator;
 428
 429        if (e->type->ops.former_request)
 430                return e->type->ops.former_request(q, rq);
 431
 432        return NULL;
 433}
 434
 435#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 436
 437static ssize_t
 438elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 439{
 440        struct elv_fs_entry *entry = to_elv(attr);
 441        struct elevator_queue *e;
 442        ssize_t error;
 443
 444        if (!entry->show)
 445                return -EIO;
 446
 447        e = container_of(kobj, struct elevator_queue, kobj);
 448        mutex_lock(&e->sysfs_lock);
 449        error = e->type ? entry->show(e, page) : -ENOENT;
 450        mutex_unlock(&e->sysfs_lock);
 451        return error;
 452}
 453
 454static ssize_t
 455elv_attr_store(struct kobject *kobj, struct attribute *attr,
 456               const char *page, size_t length)
 457{
 458        struct elv_fs_entry *entry = to_elv(attr);
 459        struct elevator_queue *e;
 460        ssize_t error;
 461
 462        if (!entry->store)
 463                return -EIO;
 464
 465        e = container_of(kobj, struct elevator_queue, kobj);
 466        mutex_lock(&e->sysfs_lock);
 467        error = e->type ? entry->store(e, page, length) : -ENOENT;
 468        mutex_unlock(&e->sysfs_lock);
 469        return error;
 470}
 471
 472static const struct sysfs_ops elv_sysfs_ops = {
 473        .show   = elv_attr_show,
 474        .store  = elv_attr_store,
 475};
 476
 477static struct kobj_type elv_ktype = {
 478        .sysfs_ops      = &elv_sysfs_ops,
 479        .release        = elevator_release,
 480};
 481
 482int elv_register_queue(struct request_queue *q, bool uevent)
 483{
 484        struct elevator_queue *e = q->elevator;
 485        int error;
 486
 487        lockdep_assert_held(&q->sysfs_lock);
 488
 489        error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 490        if (!error) {
 491                struct elv_fs_entry *attr = e->type->elevator_attrs;
 492                if (attr) {
 493                        while (attr->attr.name) {
 494                                if (sysfs_create_file(&e->kobj, &attr->attr))
 495                                        break;
 496                                attr++;
 497                        }
 498                }
 499                if (uevent)
 500                        kobject_uevent(&e->kobj, KOBJ_ADD);
 501
 502                e->registered = 1;
 503        }
 504        return error;
 505}
 506
 507void elv_unregister_queue(struct request_queue *q)
 508{
 509        lockdep_assert_held(&q->sysfs_lock);
 510
 511        if (q) {
 512                struct elevator_queue *e = q->elevator;
 513
 514                kobject_uevent(&e->kobj, KOBJ_REMOVE);
 515                kobject_del(&e->kobj);
 516
 517                e->registered = 0;
 518                /* Re-enable throttling in case elevator disabled it */
 519                wbt_enable_default(q);
 520        }
 521}
 522
 523int elv_register(struct elevator_type *e)
 524{
 525        /* create icq_cache if requested */
 526        if (e->icq_size) {
 527                if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 528                    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 529                        return -EINVAL;
 530
 531                snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 532                         "%s_io_cq", e->elevator_name);
 533                e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 534                                                 e->icq_align, 0, NULL);
 535                if (!e->icq_cache)
 536                        return -ENOMEM;
 537        }
 538
 539        /* register, don't allow duplicate names */
 540        spin_lock(&elv_list_lock);
 541        if (elevator_find(e->elevator_name, 0)) {
 542                spin_unlock(&elv_list_lock);
 543                kmem_cache_destroy(e->icq_cache);
 544                return -EBUSY;
 545        }
 546        list_add_tail(&e->list, &elv_list);
 547        spin_unlock(&elv_list_lock);
 548
 549        printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
 550
 551        return 0;
 552}
 553EXPORT_SYMBOL_GPL(elv_register);
 554
 555void elv_unregister(struct elevator_type *e)
 556{
 557        /* unregister */
 558        spin_lock(&elv_list_lock);
 559        list_del_init(&e->list);
 560        spin_unlock(&elv_list_lock);
 561
 562        /*
 563         * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 564         * sure all RCU operations are complete before proceeding.
 565         */
 566        if (e->icq_cache) {
 567                rcu_barrier();
 568                kmem_cache_destroy(e->icq_cache);
 569                e->icq_cache = NULL;
 570        }
 571}
 572EXPORT_SYMBOL_GPL(elv_unregister);
 573
 574int elevator_switch_mq(struct request_queue *q,
 575                              struct elevator_type *new_e)
 576{
 577        int ret;
 578
 579        lockdep_assert_held(&q->sysfs_lock);
 580
 581        if (q->elevator) {
 582                if (q->elevator->registered)
 583                        elv_unregister_queue(q);
 584
 585                ioc_clear_queue(q);
 586                elevator_exit(q, q->elevator);
 587        }
 588
 589        ret = blk_mq_init_sched(q, new_e);
 590        if (ret)
 591                goto out;
 592
 593        if (new_e) {
 594                ret = elv_register_queue(q, true);
 595                if (ret) {
 596                        elevator_exit(q, q->elevator);
 597                        goto out;
 598                }
 599        }
 600
 601        if (new_e)
 602                blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 603        else
 604                blk_add_trace_msg(q, "elv switch: none");
 605
 606out:
 607        return ret;
 608}
 609
 610static inline bool elv_support_iosched(struct request_queue *q)
 611{
 612        if (!queue_is_mq(q) ||
 613            (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
 614                return false;
 615        return true;
 616}
 617
 618/*
 619 * For single queue devices, default to using mq-deadline. If we have multiple
 620 * queues or mq-deadline is not available, default to "none".
 621 */
 622static struct elevator_type *elevator_get_default(struct request_queue *q)
 623{
 624        if (q->nr_hw_queues != 1 &&
 625                        !blk_mq_is_sbitmap_shared(q->tag_set->flags))
 626                return NULL;
 627
 628        return elevator_get(q, "mq-deadline", false);
 629}
 630
 631/*
 632 * Get the first elevator providing the features required by the request queue.
 633 * Default to "none" if no matching elevator is found.
 634 */
 635static struct elevator_type *elevator_get_by_features(struct request_queue *q)
 636{
 637        struct elevator_type *e, *found = NULL;
 638
 639        spin_lock(&elv_list_lock);
 640
 641        list_for_each_entry(e, &elv_list, list) {
 642                if (elv_support_features(e->elevator_features,
 643                                         q->required_elevator_features)) {
 644                        found = e;
 645                        break;
 646                }
 647        }
 648
 649        if (found && !try_module_get(found->elevator_owner))
 650                found = NULL;
 651
 652        spin_unlock(&elv_list_lock);
 653        return found;
 654}
 655
 656/*
 657 * For a device queue that has no required features, use the default elevator
 658 * settings. Otherwise, use the first elevator available matching the required
 659 * features. If no suitable elevator is find or if the chosen elevator
 660 * initialization fails, fall back to the "none" elevator (no elevator).
 661 */
 662void elevator_init_mq(struct request_queue *q)
 663{
 664        struct elevator_type *e;
 665        int err;
 666
 667        if (!elv_support_iosched(q))
 668                return;
 669
 670        WARN_ON_ONCE(blk_queue_registered(q));
 671
 672        if (unlikely(q->elevator))
 673                return;
 674
 675        if (!q->required_elevator_features)
 676                e = elevator_get_default(q);
 677        else
 678                e = elevator_get_by_features(q);
 679        if (!e)
 680                return;
 681
 682        blk_mq_freeze_queue(q);
 683        blk_mq_quiesce_queue(q);
 684
 685        err = blk_mq_init_sched(q, e);
 686
 687        blk_mq_unquiesce_queue(q);
 688        blk_mq_unfreeze_queue(q);
 689
 690        if (err) {
 691                pr_warn("\"%s\" elevator initialization failed, "
 692                        "falling back to \"none\"\n", e->elevator_name);
 693                elevator_put(e);
 694        }
 695}
 696
 697
 698/*
 699 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 700 * we don't free the old io scheduler, before we have allocated what we
 701 * need for the new one. this way we have a chance of going back to the old
 702 * one, if the new one fails init for some reason.
 703 */
 704static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 705{
 706        int err;
 707
 708        lockdep_assert_held(&q->sysfs_lock);
 709
 710        blk_mq_freeze_queue(q);
 711        blk_mq_quiesce_queue(q);
 712
 713        err = elevator_switch_mq(q, new_e);
 714
 715        blk_mq_unquiesce_queue(q);
 716        blk_mq_unfreeze_queue(q);
 717
 718        return err;
 719}
 720
 721/*
 722 * Switch this queue to the given IO scheduler.
 723 */
 724static int __elevator_change(struct request_queue *q, const char *name)
 725{
 726        char elevator_name[ELV_NAME_MAX];
 727        struct elevator_type *e;
 728
 729        /* Make sure queue is not in the middle of being removed */
 730        if (!blk_queue_registered(q))
 731                return -ENOENT;
 732
 733        /*
 734         * Special case for mq, turn off scheduling
 735         */
 736        if (!strncmp(name, "none", 4)) {
 737                if (!q->elevator)
 738                        return 0;
 739                return elevator_switch(q, NULL);
 740        }
 741
 742        strlcpy(elevator_name, name, sizeof(elevator_name));
 743        e = elevator_get(q, strstrip(elevator_name), true);
 744        if (!e)
 745                return -EINVAL;
 746
 747        if (q->elevator &&
 748            elevator_match(q->elevator->type, elevator_name, 0)) {
 749                elevator_put(e);
 750                return 0;
 751        }
 752
 753        return elevator_switch(q, e);
 754}
 755
 756ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 757                          size_t count)
 758{
 759        int ret;
 760
 761        if (!elv_support_iosched(q))
 762                return count;
 763
 764        ret = __elevator_change(q, name);
 765        if (!ret)
 766                return count;
 767
 768        return ret;
 769}
 770
 771ssize_t elv_iosched_show(struct request_queue *q, char *name)
 772{
 773        struct elevator_queue *e = q->elevator;
 774        struct elevator_type *elv = NULL;
 775        struct elevator_type *__e;
 776        int len = 0;
 777
 778        if (!queue_is_mq(q))
 779                return sprintf(name, "none\n");
 780
 781        if (!q->elevator)
 782                len += sprintf(name+len, "[none] ");
 783        else
 784                elv = e->type;
 785
 786        spin_lock(&elv_list_lock);
 787        list_for_each_entry(__e, &elv_list, list) {
 788                if (elv && elevator_match(elv, __e->elevator_name, 0)) {
 789                        len += sprintf(name+len, "[%s] ", elv->elevator_name);
 790                        continue;
 791                }
 792                if (elv_support_iosched(q) &&
 793                    elevator_match(__e, __e->elevator_name,
 794                                   q->required_elevator_features))
 795                        len += sprintf(name+len, "%s ", __e->elevator_name);
 796        }
 797        spin_unlock(&elv_list_lock);
 798
 799        if (q->elevator)
 800                len += sprintf(name+len, "none");
 801
 802        len += sprintf(len+name, "\n");
 803        return len;
 804}
 805
 806struct request *elv_rb_former_request(struct request_queue *q,
 807                                      struct request *rq)
 808{
 809        struct rb_node *rbprev = rb_prev(&rq->rb_node);
 810
 811        if (rbprev)
 812                return rb_entry_rq(rbprev);
 813
 814        return NULL;
 815}
 816EXPORT_SYMBOL(elv_rb_former_request);
 817
 818struct request *elv_rb_latter_request(struct request_queue *q,
 819                                      struct request *rq)
 820{
 821        struct rb_node *rbnext = rb_next(&rq->rb_node);
 822
 823        if (rbnext)
 824                return rb_entry_rq(rbnext);
 825
 826        return NULL;
 827}
 828EXPORT_SYMBOL(elv_rb_latter_request);
 829
 830static int __init elevator_setup(char *str)
 831{
 832        pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
 833                "Please use sysfs to set IO scheduler for individual devices.\n");
 834        return 1;
 835}
 836
 837__setup("elevator=", elevator_setup);
 838