linux/block/elevator.c
<<
>>
Prefs
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
  34#include <linux/blktrace_api.h>
  35#include <linux/hash.h>
  36#include <linux/uaccess.h>
  37
  38#include <trace/events/block.h>
  39
  40#include "blk.h"
  41#include "blk-cgroup.h"
  42
  43static DEFINE_SPINLOCK(elv_list_lock);
  44static LIST_HEAD(elv_list);
  45
  46/*
  47 * Merge hash stuff.
  48 */
  49#define rq_hash_key(rq)         (blk_rq_pos(rq) + blk_rq_sectors(rq))
  50
  51/*
  52 * Query io scheduler to see if the current process issuing bio may be
  53 * merged with rq.
  54 */
  55static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  56{
  57        struct request_queue *q = rq->q;
  58        struct elevator_queue *e = q->elevator;
  59
  60        if (e->type->ops.elevator_allow_merge_fn)
  61                return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  62
  63        return 1;
  64}
  65
  66/*
  67 * can we safely merge with this request?
  68 */
  69bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  70{
  71        if (!blk_rq_merge_ok(rq, bio))
  72                return 0;
  73
  74        if (!elv_iosched_allow_merge(rq, bio))
  75                return 0;
  76
  77        return 1;
  78}
  79EXPORT_SYMBOL(elv_rq_merge_ok);
  80
  81static struct elevator_type *elevator_find(const char *name)
  82{
  83        struct elevator_type *e;
  84
  85        list_for_each_entry(e, &elv_list, list) {
  86                if (!strcmp(e->elevator_name, name))
  87                        return e;
  88        }
  89
  90        return NULL;
  91}
  92
  93static void elevator_put(struct elevator_type *e)
  94{
  95        module_put(e->elevator_owner);
  96}
  97
  98static struct elevator_type *elevator_get(const char *name, bool try_loading)
  99{
 100        struct elevator_type *e;
 101
 102        spin_lock(&elv_list_lock);
 103
 104        e = elevator_find(name);
 105        if (!e && try_loading) {
 106                spin_unlock(&elv_list_lock);
 107                request_module("%s-iosched", name);
 108                spin_lock(&elv_list_lock);
 109                e = elevator_find(name);
 110        }
 111
 112        if (e && !try_module_get(e->elevator_owner))
 113                e = NULL;
 114
 115        spin_unlock(&elv_list_lock);
 116
 117        return e;
 118}
 119
 120static char chosen_elevator[ELV_NAME_MAX];
 121
 122static int __init elevator_setup(char *str)
 123{
 124        /*
 125         * Be backwards-compatible with previous kernels, so users
 126         * won't get the wrong elevator.
 127         */
 128        strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 129        return 1;
 130}
 131
 132__setup("elevator=", elevator_setup);
 133
 134/* called during boot to load the elevator chosen by the elevator param */
 135void __init load_default_elevator_module(void)
 136{
 137        struct elevator_type *e;
 138
 139        if (!chosen_elevator[0])
 140                return;
 141
 142        spin_lock(&elv_list_lock);
 143        e = elevator_find(chosen_elevator);
 144        spin_unlock(&elv_list_lock);
 145
 146        if (!e)
 147                request_module("%s-iosched", chosen_elevator);
 148}
 149
 150static struct kobj_type elv_ktype;
 151
 152static struct elevator_queue *elevator_alloc(struct request_queue *q,
 153                                  struct elevator_type *e)
 154{
 155        struct elevator_queue *eq;
 156
 157        eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
 158        if (unlikely(!eq))
 159                goto err;
 160
 161        eq->type = e;
 162        kobject_init(&eq->kobj, &elv_ktype);
 163        mutex_init(&eq->sysfs_lock);
 164        hash_init(eq->hash);
 165
 166        return eq;
 167err:
 168        kfree(eq);
 169        elevator_put(e);
 170        return NULL;
 171}
 172
 173static void elevator_release(struct kobject *kobj)
 174{
 175        struct elevator_queue *e;
 176
 177        e = container_of(kobj, struct elevator_queue, kobj);
 178        elevator_put(e->type);
 179        kfree(e);
 180}
 181
 182int elevator_init(struct request_queue *q, char *name)
 183{
 184        struct elevator_type *e = NULL;
 185        int err;
 186
 187        if (unlikely(q->elevator))
 188                return 0;
 189
 190        INIT_LIST_HEAD(&q->queue_head);
 191        q->last_merge = NULL;
 192        q->end_sector = 0;
 193        q->boundary_rq = NULL;
 194
 195        if (name) {
 196                e = elevator_get(name, true);
 197                if (!e)
 198                        return -EINVAL;
 199        }
 200
 201        /*
 202         * Use the default elevator specified by config boot param or
 203         * config option.  Don't try to load modules as we could be running
 204         * off async and request_module() isn't allowed from async.
 205         */
 206        if (!e && *chosen_elevator) {
 207                e = elevator_get(chosen_elevator, false);
 208                if (!e)
 209                        printk(KERN_ERR "I/O scheduler %s not found\n",
 210                                                        chosen_elevator);
 211        }
 212
 213        if (!e) {
 214                e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
 215                if (!e) {
 216                        printk(KERN_ERR
 217                                "Default I/O scheduler not found. " \
 218                                "Using noop.\n");
 219                        e = elevator_get("noop", false);
 220                }
 221        }
 222
 223        q->elevator = elevator_alloc(q, e);
 224        if (!q->elevator)
 225                return -ENOMEM;
 226
 227        err = e->ops.elevator_init_fn(q);
 228        if (err) {
 229                kobject_put(&q->elevator->kobj);
 230                return err;
 231        }
 232
 233        return 0;
 234}
 235EXPORT_SYMBOL(elevator_init);
 236
 237void elevator_exit(struct elevator_queue *e)
 238{
 239        mutex_lock(&e->sysfs_lock);
 240        if (e->type->ops.elevator_exit_fn)
 241                e->type->ops.elevator_exit_fn(e);
 242        mutex_unlock(&e->sysfs_lock);
 243
 244        kobject_put(&e->kobj);
 245}
 246EXPORT_SYMBOL(elevator_exit);
 247
 248static inline void __elv_rqhash_del(struct request *rq)
 249{
 250        hash_del(&rq->hash);
 251}
 252
 253static void elv_rqhash_del(struct request_queue *q, struct request *rq)
 254{
 255        if (ELV_ON_HASH(rq))
 256                __elv_rqhash_del(rq);
 257}
 258
 259static void elv_rqhash_add(struct request_queue *q, struct request *rq)
 260{
 261        struct elevator_queue *e = q->elevator;
 262
 263        BUG_ON(ELV_ON_HASH(rq));
 264        hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 265}
 266
 267static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 268{
 269        __elv_rqhash_del(rq);
 270        elv_rqhash_add(q, rq);
 271}
 272
 273static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 274{
 275        struct elevator_queue *e = q->elevator;
 276        struct hlist_node *next;
 277        struct request *rq;
 278
 279        hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
 280                BUG_ON(!ELV_ON_HASH(rq));
 281
 282                if (unlikely(!rq_mergeable(rq))) {
 283                        __elv_rqhash_del(rq);
 284                        continue;
 285                }
 286
 287                if (rq_hash_key(rq) == offset)
 288                        return rq;
 289        }
 290
 291        return NULL;
 292}
 293
 294/*
 295 * RB-tree support functions for inserting/lookup/removal of requests
 296 * in a sorted RB tree.
 297 */
 298void elv_rb_add(struct rb_root *root, struct request *rq)
 299{
 300        struct rb_node **p = &root->rb_node;
 301        struct rb_node *parent = NULL;
 302        struct request *__rq;
 303
 304        while (*p) {
 305                parent = *p;
 306                __rq = rb_entry(parent, struct request, rb_node);
 307
 308                if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 309                        p = &(*p)->rb_left;
 310                else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 311                        p = &(*p)->rb_right;
 312        }
 313
 314        rb_link_node(&rq->rb_node, parent, p);
 315        rb_insert_color(&rq->rb_node, root);
 316}
 317EXPORT_SYMBOL(elv_rb_add);
 318
 319void elv_rb_del(struct rb_root *root, struct request *rq)
 320{
 321        BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 322        rb_erase(&rq->rb_node, root);
 323        RB_CLEAR_NODE(&rq->rb_node);
 324}
 325EXPORT_SYMBOL(elv_rb_del);
 326
 327struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 328{
 329        struct rb_node *n = root->rb_node;
 330        struct request *rq;
 331
 332        while (n) {
 333                rq = rb_entry(n, struct request, rb_node);
 334
 335                if (sector < blk_rq_pos(rq))
 336                        n = n->rb_left;
 337                else if (sector > blk_rq_pos(rq))
 338                        n = n->rb_right;
 339                else
 340                        return rq;
 341        }
 342
 343        return NULL;
 344}
 345EXPORT_SYMBOL(elv_rb_find);
 346
 347/*
 348 * Insert rq into dispatch queue of q.  Queue lock must be held on
 349 * entry.  rq is sort instead into the dispatch queue. To be used by
 350 * specific elevators.
 351 */
 352void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 353{
 354        sector_t boundary;
 355        struct list_head *entry;
 356        int stop_flags;
 357
 358        if (q->last_merge == rq)
 359                q->last_merge = NULL;
 360
 361        elv_rqhash_del(q, rq);
 362
 363        q->nr_sorted--;
 364
 365        boundary = q->end_sector;
 366        stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 367        list_for_each_prev(entry, &q->queue_head) {
 368                struct request *pos = list_entry_rq(entry);
 369
 370                if ((rq->cmd_flags & REQ_DISCARD) !=
 371                    (pos->cmd_flags & REQ_DISCARD))
 372                        break;
 373                if (rq_data_dir(rq) != rq_data_dir(pos))
 374                        break;
 375                if (pos->cmd_flags & stop_flags)
 376                        break;
 377                if (blk_rq_pos(rq) >= boundary) {
 378                        if (blk_rq_pos(pos) < boundary)
 379                                continue;
 380                } else {
 381                        if (blk_rq_pos(pos) >= boundary)
 382                                break;
 383                }
 384                if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 385                        break;
 386        }
 387
 388        list_add(&rq->queuelist, entry);
 389}
 390EXPORT_SYMBOL(elv_dispatch_sort);
 391
 392/*
 393 * Insert rq into dispatch queue of q.  Queue lock must be held on
 394 * entry.  rq is added to the back of the dispatch queue. To be used by
 395 * specific elevators.
 396 */
 397void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 398{
 399        if (q->last_merge == rq)
 400                q->last_merge = NULL;
 401
 402        elv_rqhash_del(q, rq);
 403
 404        q->nr_sorted--;
 405
 406        q->end_sector = rq_end_sector(rq);
 407        q->boundary_rq = rq;
 408        list_add_tail(&rq->queuelist, &q->queue_head);
 409}
 410EXPORT_SYMBOL(elv_dispatch_add_tail);
 411
 412int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 413{
 414        struct elevator_queue *e = q->elevator;
 415        struct request *__rq;
 416        int ret;
 417
 418        /*
 419         * Levels of merges:
 420         *      nomerges:  No merges at all attempted
 421         *      noxmerges: Only simple one-hit cache try
 422         *      merges:    All merge tries attempted
 423         */
 424        if (blk_queue_nomerges(q))
 425                return ELEVATOR_NO_MERGE;
 426
 427        /*
 428         * First try one-hit cache.
 429         */
 430        if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
 431                ret = blk_try_merge(q->last_merge, bio);
 432                if (ret != ELEVATOR_NO_MERGE) {
 433                        *req = q->last_merge;
 434                        return ret;
 435                }
 436        }
 437
 438        if (blk_queue_noxmerges(q))
 439                return ELEVATOR_NO_MERGE;
 440
 441        /*
 442         * See if our hash lookup can find a potential backmerge.
 443         */
 444        __rq = elv_rqhash_find(q, bio->bi_sector);
 445        if (__rq && elv_rq_merge_ok(__rq, bio)) {
 446                *req = __rq;
 447                return ELEVATOR_BACK_MERGE;
 448        }
 449
 450        if (e->type->ops.elevator_merge_fn)
 451                return e->type->ops.elevator_merge_fn(q, req, bio);
 452
 453        return ELEVATOR_NO_MERGE;
 454}
 455
 456/*
 457 * Attempt to do an insertion back merge. Only check for the case where
 458 * we can append 'rq' to an existing request, so we can throw 'rq' away
 459 * afterwards.
 460 *
 461 * Returns true if we merged, false otherwise
 462 */
 463static bool elv_attempt_insert_merge(struct request_queue *q,
 464                                     struct request *rq)
 465{
 466        struct request *__rq;
 467        bool ret;
 468
 469        if (blk_queue_nomerges(q))
 470                return false;
 471
 472        /*
 473         * First try one-hit cache.
 474         */
 475        if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 476                return true;
 477
 478        if (blk_queue_noxmerges(q))
 479                return false;
 480
 481        ret = false;
 482        /*
 483         * See if our hash lookup can find a potential backmerge.
 484         */
 485        while (1) {
 486                __rq = elv_rqhash_find(q, blk_rq_pos(rq));
 487                if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
 488                        break;
 489
 490                /* The merged request could be merged with others, try again */
 491                ret = true;
 492                rq = __rq;
 493        }
 494
 495        return ret;
 496}
 497
 498void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 499{
 500        struct elevator_queue *e = q->elevator;
 501
 502        if (e->type->ops.elevator_merged_fn)
 503                e->type->ops.elevator_merged_fn(q, rq, type);
 504
 505        if (type == ELEVATOR_BACK_MERGE)
 506                elv_rqhash_reposition(q, rq);
 507
 508        q->last_merge = rq;
 509}
 510
 511void elv_merge_requests(struct request_queue *q, struct request *rq,
 512                             struct request *next)
 513{
 514        struct elevator_queue *e = q->elevator;
 515        const int next_sorted = next->cmd_flags & REQ_SORTED;
 516
 517        if (next_sorted && e->type->ops.elevator_merge_req_fn)
 518                e->type->ops.elevator_merge_req_fn(q, rq, next);
 519
 520        elv_rqhash_reposition(q, rq);
 521
 522        if (next_sorted) {
 523                elv_rqhash_del(q, next);
 524                q->nr_sorted--;
 525        }
 526
 527        q->last_merge = rq;
 528}
 529
 530void elv_bio_merged(struct request_queue *q, struct request *rq,
 531                        struct bio *bio)
 532{
 533        struct elevator_queue *e = q->elevator;
 534
 535        if (e->type->ops.elevator_bio_merged_fn)
 536                e->type->ops.elevator_bio_merged_fn(q, rq, bio);
 537}
 538
 539void elv_requeue_request(struct request_queue *q, struct request *rq)
 540{
 541        /*
 542         * it already went through dequeue, we need to decrement the
 543         * in_flight count again
 544         */
 545        if (blk_account_rq(rq)) {
 546                q->in_flight[rq_is_sync(rq)]--;
 547                if (rq->cmd_flags & REQ_SORTED)
 548                        elv_deactivate_rq(q, rq);
 549        }
 550
 551        rq->cmd_flags &= ~REQ_STARTED;
 552
 553        __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 554}
 555
 556void elv_drain_elevator(struct request_queue *q)
 557{
 558        static int printed;
 559
 560        lockdep_assert_held(q->queue_lock);
 561
 562        while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
 563                ;
 564        if (q->nr_sorted && printed++ < 10) {
 565                printk(KERN_ERR "%s: forced dispatching is broken "
 566                       "(nr_sorted=%u), please report this\n",
 567                       q->elevator->type->elevator_name, q->nr_sorted);
 568        }
 569}
 570
 571void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 572{
 573        trace_block_rq_insert(q, rq);
 574
 575        rq->q = q;
 576
 577        if (rq->cmd_flags & REQ_SOFTBARRIER) {
 578                /* barriers are scheduling boundary, update end_sector */
 579                if (rq->cmd_type == REQ_TYPE_FS) {
 580                        q->end_sector = rq_end_sector(rq);
 581                        q->boundary_rq = rq;
 582                }
 583        } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
 584                    (where == ELEVATOR_INSERT_SORT ||
 585                     where == ELEVATOR_INSERT_SORT_MERGE))
 586                where = ELEVATOR_INSERT_BACK;
 587
 588        switch (where) {
 589        case ELEVATOR_INSERT_REQUEUE:
 590        case ELEVATOR_INSERT_FRONT:
 591                rq->cmd_flags |= REQ_SOFTBARRIER;
 592                list_add(&rq->queuelist, &q->queue_head);
 593                break;
 594
 595        case ELEVATOR_INSERT_BACK:
 596                rq->cmd_flags |= REQ_SOFTBARRIER;
 597                elv_drain_elevator(q);
 598                list_add_tail(&rq->queuelist, &q->queue_head);
 599                /*
 600                 * We kick the queue here for the following reasons.
 601                 * - The elevator might have returned NULL previously
 602                 *   to delay requests and returned them now.  As the
 603                 *   queue wasn't empty before this request, ll_rw_blk
 604                 *   won't run the queue on return, resulting in hang.
 605                 * - Usually, back inserted requests won't be merged
 606                 *   with anything.  There's no point in delaying queue
 607                 *   processing.
 608                 */
 609                __blk_run_queue(q);
 610                break;
 611
 612        case ELEVATOR_INSERT_SORT_MERGE:
 613                /*
 614                 * If we succeed in merging this request with one in the
 615                 * queue already, we are done - rq has now been freed,
 616                 * so no need to do anything further.
 617                 */
 618                if (elv_attempt_insert_merge(q, rq))
 619                        break;
 620        case ELEVATOR_INSERT_SORT:
 621                BUG_ON(rq->cmd_type != REQ_TYPE_FS);
 622                rq->cmd_flags |= REQ_SORTED;
 623                q->nr_sorted++;
 624                if (rq_mergeable(rq)) {
 625                        elv_rqhash_add(q, rq);
 626                        if (!q->last_merge)
 627                                q->last_merge = rq;
 628                }
 629
 630                /*
 631                 * Some ioscheds (cfq) run q->request_fn directly, so
 632                 * rq cannot be accessed after calling
 633                 * elevator_add_req_fn.
 634                 */
 635                q->elevator->type->ops.elevator_add_req_fn(q, rq);
 636                break;
 637
 638        case ELEVATOR_INSERT_FLUSH:
 639                rq->cmd_flags |= REQ_SOFTBARRIER;
 640                blk_insert_flush(rq);
 641                break;
 642        default:
 643                printk(KERN_ERR "%s: bad insertion point %d\n",
 644                       __func__, where);
 645                BUG();
 646        }
 647}
 648EXPORT_SYMBOL(__elv_add_request);
 649
 650void elv_add_request(struct request_queue *q, struct request *rq, int where)
 651{
 652        unsigned long flags;
 653
 654        spin_lock_irqsave(q->queue_lock, flags);
 655        __elv_add_request(q, rq, where);
 656        spin_unlock_irqrestore(q->queue_lock, flags);
 657}
 658EXPORT_SYMBOL(elv_add_request);
 659
 660struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 661{
 662        struct elevator_queue *e = q->elevator;
 663
 664        if (e->type->ops.elevator_latter_req_fn)
 665                return e->type->ops.elevator_latter_req_fn(q, rq);
 666        return NULL;
 667}
 668
 669struct request *elv_former_request(struct request_queue *q, struct request *rq)
 670{
 671        struct elevator_queue *e = q->elevator;
 672
 673        if (e->type->ops.elevator_former_req_fn)
 674                return e->type->ops.elevator_former_req_fn(q, rq);
 675        return NULL;
 676}
 677
 678int elv_set_request(struct request_queue *q, struct request *rq,
 679                    struct bio *bio, gfp_t gfp_mask)
 680{
 681        struct elevator_queue *e = q->elevator;
 682
 683        if (e->type->ops.elevator_set_req_fn)
 684                return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
 685        return 0;
 686}
 687
 688void elv_put_request(struct request_queue *q, struct request *rq)
 689{
 690        struct elevator_queue *e = q->elevator;
 691
 692        if (e->type->ops.elevator_put_req_fn)
 693                e->type->ops.elevator_put_req_fn(rq);
 694}
 695
 696int elv_may_queue(struct request_queue *q, int rw)
 697{
 698        struct elevator_queue *e = q->elevator;
 699
 700        if (e->type->ops.elevator_may_queue_fn)
 701                return e->type->ops.elevator_may_queue_fn(q, rw);
 702
 703        return ELV_MQUEUE_MAY;
 704}
 705
 706void elv_abort_queue(struct request_queue *q)
 707{
 708        struct request *rq;
 709
 710        blk_abort_flushes(q);
 711
 712        while (!list_empty(&q->queue_head)) {
 713                rq = list_entry_rq(q->queue_head.next);
 714                rq->cmd_flags |= REQ_QUIET;
 715                trace_block_rq_abort(q, rq);
 716                /*
 717                 * Mark this request as started so we don't trigger
 718                 * any debug logic in the end I/O path.
 719                 */
 720                blk_start_request(rq);
 721                __blk_end_request_all(rq, -EIO);
 722        }
 723}
 724EXPORT_SYMBOL(elv_abort_queue);
 725
 726void elv_completed_request(struct request_queue *q, struct request *rq)
 727{
 728        struct elevator_queue *e = q->elevator;
 729
 730        /*
 731         * request is released from the driver, io must be done
 732         */
 733        if (blk_account_rq(rq)) {
 734                q->in_flight[rq_is_sync(rq)]--;
 735                if ((rq->cmd_flags & REQ_SORTED) &&
 736                    e->type->ops.elevator_completed_req_fn)
 737                        e->type->ops.elevator_completed_req_fn(q, rq);
 738        }
 739}
 740
 741#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 742
 743static ssize_t
 744elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 745{
 746        struct elv_fs_entry *entry = to_elv(attr);
 747        struct elevator_queue *e;
 748        ssize_t error;
 749
 750        if (!entry->show)
 751                return -EIO;
 752
 753        e = container_of(kobj, struct elevator_queue, kobj);
 754        mutex_lock(&e->sysfs_lock);
 755        error = e->type ? entry->show(e, page) : -ENOENT;
 756        mutex_unlock(&e->sysfs_lock);
 757        return error;
 758}
 759
 760static ssize_t
 761elv_attr_store(struct kobject *kobj, struct attribute *attr,
 762               const char *page, size_t length)
 763{
 764        struct elv_fs_entry *entry = to_elv(attr);
 765        struct elevator_queue *e;
 766        ssize_t error;
 767
 768        if (!entry->store)
 769                return -EIO;
 770
 771        e = container_of(kobj, struct elevator_queue, kobj);
 772        mutex_lock(&e->sysfs_lock);
 773        error = e->type ? entry->store(e, page, length) : -ENOENT;
 774        mutex_unlock(&e->sysfs_lock);
 775        return error;
 776}
 777
 778static const struct sysfs_ops elv_sysfs_ops = {
 779        .show   = elv_attr_show,
 780        .store  = elv_attr_store,
 781};
 782
 783static struct kobj_type elv_ktype = {
 784        .sysfs_ops      = &elv_sysfs_ops,
 785        .release        = elevator_release,
 786};
 787
 788int elv_register_queue(struct request_queue *q)
 789{
 790        struct elevator_queue *e = q->elevator;
 791        int error;
 792
 793        error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 794        if (!error) {
 795                struct elv_fs_entry *attr = e->type->elevator_attrs;
 796                if (attr) {
 797                        while (attr->attr.name) {
 798                                if (sysfs_create_file(&e->kobj, &attr->attr))
 799                                        break;
 800                                attr++;
 801                        }
 802                }
 803                kobject_uevent(&e->kobj, KOBJ_ADD);
 804                e->registered = 1;
 805        }
 806        return error;
 807}
 808EXPORT_SYMBOL(elv_register_queue);
 809
 810void elv_unregister_queue(struct request_queue *q)
 811{
 812        if (q) {
 813                struct elevator_queue *e = q->elevator;
 814
 815                kobject_uevent(&e->kobj, KOBJ_REMOVE);
 816                kobject_del(&e->kobj);
 817                e->registered = 0;
 818        }
 819}
 820EXPORT_SYMBOL(elv_unregister_queue);
 821
 822int elv_register(struct elevator_type *e)
 823{
 824        char *def = "";
 825
 826        /* create icq_cache if requested */
 827        if (e->icq_size) {
 828                if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 829                    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 830                        return -EINVAL;
 831
 832                snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 833                         "%s_io_cq", e->elevator_name);
 834                e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 835                                                 e->icq_align, 0, NULL);
 836                if (!e->icq_cache)
 837                        return -ENOMEM;
 838        }
 839
 840        /* register, don't allow duplicate names */
 841        spin_lock(&elv_list_lock);
 842        if (elevator_find(e->elevator_name)) {
 843                spin_unlock(&elv_list_lock);
 844                if (e->icq_cache)
 845                        kmem_cache_destroy(e->icq_cache);
 846                return -EBUSY;
 847        }
 848        list_add_tail(&e->list, &elv_list);
 849        spin_unlock(&elv_list_lock);
 850
 851        /* print pretty message */
 852        if (!strcmp(e->elevator_name, chosen_elevator) ||
 853                        (!*chosen_elevator &&
 854                         !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
 855                                def = " (default)";
 856
 857        printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 858                                                                def);
 859        return 0;
 860}
 861EXPORT_SYMBOL_GPL(elv_register);
 862
 863void elv_unregister(struct elevator_type *e)
 864{
 865        /* unregister */
 866        spin_lock(&elv_list_lock);
 867        list_del_init(&e->list);
 868        spin_unlock(&elv_list_lock);
 869
 870        /*
 871         * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 872         * sure all RCU operations are complete before proceeding.
 873         */
 874        if (e->icq_cache) {
 875                rcu_barrier();
 876                kmem_cache_destroy(e->icq_cache);
 877                e->icq_cache = NULL;
 878        }
 879}
 880EXPORT_SYMBOL_GPL(elv_unregister);
 881
 882/*
 883 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 884 * we don't free the old io scheduler, before we have allocated what we
 885 * need for the new one. this way we have a chance of going back to the old
 886 * one, if the new one fails init for some reason.
 887 */
 888static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 889{
 890        struct elevator_queue *old = q->elevator;
 891        bool registered = old->registered;
 892        int err;
 893
 894        /*
 895         * Turn on BYPASS and drain all requests w/ elevator private data.
 896         * Block layer doesn't call into a quiesced elevator - all requests
 897         * are directly put on the dispatch list without elevator data
 898         * using INSERT_BACK.  All requests have SOFTBARRIER set and no
 899         * merge happens either.
 900         */
 901        blk_queue_bypass_start(q);
 902
 903        /* unregister and clear all auxiliary data of the old elevator */
 904        if (registered)
 905                elv_unregister_queue(q);
 906
 907        spin_lock_irq(q->queue_lock);
 908        ioc_clear_queue(q);
 909        spin_unlock_irq(q->queue_lock);
 910
 911        /* allocate, init and register new elevator */
 912        err = -ENOMEM;
 913        q->elevator = elevator_alloc(q, new_e);
 914        if (!q->elevator)
 915                goto fail_init;
 916
 917        err = new_e->ops.elevator_init_fn(q);
 918        if (err) {
 919                kobject_put(&q->elevator->kobj);
 920                goto fail_init;
 921        }
 922
 923        if (registered) {
 924                err = elv_register_queue(q);
 925                if (err)
 926                        goto fail_register;
 927        }
 928
 929        /* done, kill the old one and finish */
 930        elevator_exit(old);
 931        blk_queue_bypass_end(q);
 932
 933        blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 934
 935        return 0;
 936
 937fail_register:
 938        elevator_exit(q->elevator);
 939fail_init:
 940        /* switch failed, restore and re-register old elevator */
 941        q->elevator = old;
 942        elv_register_queue(q);
 943        blk_queue_bypass_end(q);
 944
 945        return err;
 946}
 947
 948/*
 949 * Switch this queue to the given IO scheduler.
 950 */
 951int elevator_change(struct request_queue *q, const char *name)
 952{
 953        char elevator_name[ELV_NAME_MAX];
 954        struct elevator_type *e;
 955
 956        if (!q->elevator)
 957                return -ENXIO;
 958
 959        strlcpy(elevator_name, name, sizeof(elevator_name));
 960        e = elevator_get(strstrip(elevator_name), true);
 961        if (!e) {
 962                printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
 963                return -EINVAL;
 964        }
 965
 966        if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
 967                elevator_put(e);
 968                return 0;
 969        }
 970
 971        return elevator_switch(q, e);
 972}
 973EXPORT_SYMBOL(elevator_change);
 974
 975ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 976                          size_t count)
 977{
 978        int ret;
 979
 980        if (!q->elevator)
 981                return count;
 982
 983        ret = elevator_change(q, name);
 984        if (!ret)
 985                return count;
 986
 987        printk(KERN_ERR "elevator: switch to %s failed\n", name);
 988        return ret;
 989}
 990
 991ssize_t elv_iosched_show(struct request_queue *q, char *name)
 992{
 993        struct elevator_queue *e = q->elevator;
 994        struct elevator_type *elv;
 995        struct elevator_type *__e;
 996        int len = 0;
 997
 998        if (!q->elevator || !blk_queue_stackable(q))
 999                return sprintf(name, "none\n");
1000
1001        elv = e->type;
1002
1003        spin_lock(&elv_list_lock);
1004        list_for_each_entry(__e, &elv_list, list) {
1005                if (!strcmp(elv->elevator_name, __e->elevator_name))
1006                        len += sprintf(name+len, "[%s] ", elv->elevator_name);
1007                else
1008                        len += sprintf(name+len, "%s ", __e->elevator_name);
1009        }
1010        spin_unlock(&elv_list_lock);
1011
1012        len += sprintf(len+name, "\n");
1013        return len;
1014}
1015
1016struct request *elv_rb_former_request(struct request_queue *q,
1017                                      struct request *rq)
1018{
1019        struct rb_node *rbprev = rb_prev(&rq->rb_node);
1020
1021        if (rbprev)
1022                return rb_entry_rq(rbprev);
1023
1024        return NULL;
1025}
1026EXPORT_SYMBOL(elv_rb_former_request);
1027
1028struct request *elv_rb_latter_request(struct request_queue *q,
1029                                      struct request *rq)
1030{
1031        struct rb_node *rbnext = rb_next(&rq->rb_node);
1032
1033        if (rbnext)
1034                return rb_entry_rq(rbnext);
1035
1036        return NULL;
1037}
1038EXPORT_SYMBOL(elv_rb_latter_request);
1039
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.