linux/block/blk-core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1991, 1992 Linus Torvalds
   3 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   4 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
   6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   7 *      -  July2000
   8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   9 */
  10
  11/*
  12 * This handles all read/write requests to block devices
  13 */
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/backing-dev.h>
  17#include <linux/bio.h>
  18#include <linux/blkdev.h>
  19#include <linux/highmem.h>
  20#include <linux/mm.h>
  21#include <linux/kernel_stat.h>
  22#include <linux/string.h>
  23#include <linux/init.h>
  24#include <linux/completion.h>
  25#include <linux/slab.h>
  26#include <linux/swap.h>
  27#include <linux/writeback.h>
  28#include <linux/task_io_accounting_ops.h>
  29#include <linux/blktrace_api.h>
  30#include <linux/fault-inject.h>
  31#include <trace/block.h>
  32
  33#include "blk.h"
  34
  35DEFINE_TRACE(block_plug);
  36DEFINE_TRACE(block_unplug_io);
  37DEFINE_TRACE(block_unplug_timer);
  38DEFINE_TRACE(block_getrq);
  39DEFINE_TRACE(block_sleeprq);
  40DEFINE_TRACE(block_rq_requeue);
  41DEFINE_TRACE(block_bio_backmerge);
  42DEFINE_TRACE(block_bio_frontmerge);
  43DEFINE_TRACE(block_bio_queue);
  44DEFINE_TRACE(block_rq_complete);
  45DEFINE_TRACE(block_remap);      /* Also used in drivers/md/dm.c */
  46EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
  47
  48static int __make_request(struct request_queue *q, struct bio *bio);
  49
  50/*
  51 * For the allocated request tables
  52 */
  53static struct kmem_cache *request_cachep;
  54
  55/*
  56 * For queue allocation
  57 */
  58struct kmem_cache *blk_requestq_cachep;
  59
  60/*
  61 * Controlling structure to kblockd
  62 */
  63static struct workqueue_struct *kblockd_workqueue;
  64
  65static void drive_stat_acct(struct request *rq, int new_io)
  66{
  67        struct gendisk *disk = rq->rq_disk;
  68        struct hd_struct *part;
  69        int rw = rq_data_dir(rq);
  70        int cpu;
  71
  72        if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue))
  73                return;
  74
  75        cpu = part_stat_lock();
  76        part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
  77
  78        if (!new_io)
  79                part_stat_inc(cpu, part, merges[rw]);
  80        else {
  81                part_round_stats(cpu, part);
  82                part_inc_in_flight(part);
  83        }
  84
  85        part_stat_unlock();
  86}
  87
  88void blk_queue_congestion_threshold(struct request_queue *q)
  89{
  90        int nr;
  91
  92        nr = q->nr_requests - (q->nr_requests / 8) + 1;
  93        if (nr > q->nr_requests)
  94                nr = q->nr_requests;
  95        q->nr_congestion_on = nr;
  96
  97        nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  98        if (nr < 1)
  99                nr = 1;
 100        q->nr_congestion_off = nr;
 101}
 102
 103/**
 104 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
 105 * @bdev:       device
 106 *
 107 * Locates the passed device's request queue and returns the address of its
 108 * backing_dev_info
 109 *
 110 * Will return NULL if the request queue cannot be located.
 111 */
 112struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 113{
 114        struct backing_dev_info *ret = NULL;
 115        struct request_queue *q = bdev_get_queue(bdev);
 116
 117        if (q)
 118                ret = &q->backing_dev_info;
 119        return ret;
 120}
 121EXPORT_SYMBOL(blk_get_backing_dev_info);
 122
 123void blk_rq_init(struct request_queue *q, struct request *rq)
 124{
 125        memset(rq, 0, sizeof(*rq));
 126
 127        INIT_LIST_HEAD(&rq->queuelist);
 128        INIT_LIST_HEAD(&rq->timeout_list);
 129        rq->cpu = -1;
 130        rq->q = q;
 131        rq->sector = rq->hard_sector = (sector_t) -1;
 132        INIT_HLIST_NODE(&rq->hash);
 133        RB_CLEAR_NODE(&rq->rb_node);
 134        rq->cmd = rq->__cmd;
 135        rq->tag = -1;
 136        rq->ref_count = 1;
 137}
 138EXPORT_SYMBOL(blk_rq_init);
 139
 140static void req_bio_endio(struct request *rq, struct bio *bio,
 141                          unsigned int nbytes, int error)
 142{
 143        struct request_queue *q = rq->q;
 144
 145        if (&q->bar_rq != rq) {
 146                if (error)
 147                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
 148                else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
 149                        error = -EIO;
 150
 151                if (unlikely(nbytes > bio->bi_size)) {
 152                        printk(KERN_ERR "%s: want %u bytes done, %u left\n",
 153                               __func__, nbytes, bio->bi_size);
 154                        nbytes = bio->bi_size;
 155                }
 156
 157                if (unlikely(rq->cmd_flags & REQ_QUIET))
 158                        set_bit(BIO_QUIET, &bio->bi_flags);
 159
 160                bio->bi_size -= nbytes;
 161                bio->bi_sector += (nbytes >> 9);
 162
 163                if (bio_integrity(bio))
 164                        bio_integrity_advance(bio, nbytes);
 165
 166                if (bio->bi_size == 0)
 167                        bio_endio(bio, error);
 168        } else {
 169
 170                /*
 171                 * Okay, this is the barrier request in progress, just
 172                 * record the error;
 173                 */
 174                if (error && !q->orderr)
 175                        q->orderr = error;
 176        }
 177}
 178
 179void blk_dump_rq_flags(struct request *rq, char *msg)
 180{
 181        int bit;
 182
 183        printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
 184                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
 185                rq->cmd_flags);
 186
 187        printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
 188                                                (unsigned long long)rq->sector,
 189                                                rq->nr_sectors,
 190                                                rq->current_nr_sectors);
 191        printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
 192                                                rq->bio, rq->biotail,
 193                                                rq->buffer, rq->data,
 194                                                rq->data_len);
 195
 196        if (blk_pc_request(rq)) {
 197                printk(KERN_INFO "  cdb: ");
 198                for (bit = 0; bit < BLK_MAX_CDB; bit++)
 199                        printk("%02x ", rq->cmd[bit]);
 200                printk("\n");
 201        }
 202}
 203EXPORT_SYMBOL(blk_dump_rq_flags);
 204
 205/*
 206 * "plug" the device if there are no outstanding requests: this will
 207 * force the transfer to start only after we have put all the requests
 208 * on the list.
 209 *
 210 * This is called with interrupts off and no requests on the queue and
 211 * with the queue lock held.
 212 */
 213void blk_plug_device(struct request_queue *q)
 214{
 215        WARN_ON(!irqs_disabled());
 216
 217        /*
 218         * don't plug a stopped queue, it must be paired with blk_start_queue()
 219         * which will restart the queueing
 220         */
 221        if (blk_queue_stopped(q))
 222                return;
 223
 224        if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
 225                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
 226                trace_block_plug(q);
 227        }
 228}
 229EXPORT_SYMBOL(blk_plug_device);
 230
 231/**
 232 * blk_plug_device_unlocked - plug a device without queue lock held
 233 * @q:    The &struct request_queue to plug
 234 *
 235 * Description:
 236 *   Like @blk_plug_device(), but grabs the queue lock and disables
 237 *   interrupts.
 238 **/
 239void blk_plug_device_unlocked(struct request_queue *q)
 240{
 241        unsigned long flags;
 242
 243        spin_lock_irqsave(q->queue_lock, flags);
 244        blk_plug_device(q);
 245        spin_unlock_irqrestore(q->queue_lock, flags);
 246}
 247EXPORT_SYMBOL(blk_plug_device_unlocked);
 248
 249/*
 250 * remove the queue from the plugged list, if present. called with
 251 * queue lock held and interrupts disabled.
 252 */
 253int blk_remove_plug(struct request_queue *q)
 254{
 255        WARN_ON(!irqs_disabled());
 256
 257        if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
 258                return 0;
 259
 260        del_timer(&q->unplug_timer);
 261        return 1;
 262}
 263EXPORT_SYMBOL(blk_remove_plug);
 264
 265/*
 266 * remove the plug and let it rip..
 267 */
 268void __generic_unplug_device(struct request_queue *q)
 269{
 270        if (unlikely(blk_queue_stopped(q)))
 271                return;
 272        if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
 273                return;
 274
 275        q->request_fn(q);
 276}
 277
 278/**
 279 * generic_unplug_device - fire a request queue
 280 * @q:    The &struct request_queue in question
 281 *
 282 * Description:
 283 *   Linux uses plugging to build bigger requests queues before letting
 284 *   the device have at them. If a queue is plugged, the I/O scheduler
 285 *   is still adding and merging requests on the queue. Once the queue
 286 *   gets unplugged, the request_fn defined for the queue is invoked and
 287 *   transfers started.
 288 **/
 289void generic_unplug_device(struct request_queue *q)
 290{
 291        if (blk_queue_plugged(q)) {
 292                spin_lock_irq(q->queue_lock);
 293                __generic_unplug_device(q);
 294                spin_unlock_irq(q->queue_lock);
 295        }
 296}
 297EXPORT_SYMBOL(generic_unplug_device);
 298
 299static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 300                                   struct page *page)
 301{
 302        struct request_queue *q = bdi->unplug_io_data;
 303
 304        blk_unplug(q);
 305}
 306
 307void blk_unplug_work(struct work_struct *work)
 308{
 309        struct request_queue *q =
 310                container_of(work, struct request_queue, unplug_work);
 311
 312        trace_block_unplug_io(q);
 313        q->unplug_fn(q);
 314}
 315
 316void blk_unplug_timeout(unsigned long data)
 317{
 318        struct request_queue *q = (struct request_queue *)data;
 319
 320        trace_block_unplug_timer(q);
 321        kblockd_schedule_work(q, &q->unplug_work);
 322}
 323
 324void blk_unplug(struct request_queue *q)
 325{
 326        /*
 327         * devices don't necessarily have an ->unplug_fn defined
 328         */
 329        if (q->unplug_fn) {
 330                trace_block_unplug_io(q);
 331                q->unplug_fn(q);
 332        }
 333}
 334EXPORT_SYMBOL(blk_unplug);
 335
 336static void blk_invoke_request_fn(struct request_queue *q)
 337{
 338        if (unlikely(blk_queue_stopped(q)))
 339                return;
 340
 341        /*
 342         * one level of recursion is ok and is much faster than kicking
 343         * the unplug handling
 344         */
 345        if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 346                q->request_fn(q);
 347                queue_flag_clear(QUEUE_FLAG_REENTER, q);
 348        } else {
 349                queue_flag_set(QUEUE_FLAG_PLUGGED, q);
 350                kblockd_schedule_work(q, &q->unplug_work);
 351        }
 352}
 353
 354/**
 355 * blk_start_queue - restart a previously stopped queue
 356 * @q:    The &struct request_queue in question
 357 *
 358 * Description:
 359 *   blk_start_queue() will clear the stop flag on the queue, and call
 360 *   the request_fn for the queue if it was in a stopped state when
 361 *   entered. Also see blk_stop_queue(). Queue lock must be held.
 362 **/
 363void blk_start_queue(struct request_queue *q)
 364{
 365        WARN_ON(!irqs_disabled());
 366
 367        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 368        blk_invoke_request_fn(q);
 369}
 370EXPORT_SYMBOL(blk_start_queue);
 371
 372/**
 373 * blk_stop_queue - stop a queue
 374 * @q:    The &struct request_queue in question
 375 *
 376 * Description:
 377 *   The Linux block layer assumes that a block driver will consume all
 378 *   entries on the request queue when the request_fn strategy is called.
 379 *   Often this will not happen, because of hardware limitations (queue
 380 *   depth settings). If a device driver gets a 'queue full' response,
 381 *   or if it simply chooses not to queue more I/O at one point, it can
 382 *   call this function to prevent the request_fn from being called until
 383 *   the driver has signalled it's ready to go again. This happens by calling
 384 *   blk_start_queue() to restart queue operations. Queue lock must be held.
 385 **/
 386void blk_stop_queue(struct request_queue *q)
 387{
 388        blk_remove_plug(q);
 389        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 390}
 391EXPORT_SYMBOL(blk_stop_queue);
 392
 393/**
 394 * blk_sync_queue - cancel any pending callbacks on a queue
 395 * @q: the queue
 396 *
 397 * Description:
 398 *     The block layer may perform asynchronous callback activity
 399 *     on a queue, such as calling the unplug function after a timeout.
 400 *     A block device may call blk_sync_queue to ensure that any
 401 *     such activity is cancelled, thus allowing it to release resources
 402 *     that the callbacks might use. The caller must already have made sure
 403 *     that its ->make_request_fn will not re-add plugging prior to calling
 404 *     this function.
 405 *
 406 */
 407void blk_sync_queue(struct request_queue *q)
 408{
 409        del_timer_sync(&q->unplug_timer);
 410        del_timer_sync(&q->timeout);
 411        cancel_work_sync(&q->unplug_work);
 412}
 413EXPORT_SYMBOL(blk_sync_queue);
 414
 415/**
 416 * __blk_run_queue - run a single device queue
 417 * @q:  The queue to run
 418 *
 419 * Description:
 420 *    See @blk_run_queue. This variant must be called with the queue lock
 421 *    held and interrupts disabled.
 422 *
 423 */
 424void __blk_run_queue(struct request_queue *q)
 425{
 426        blk_remove_plug(q);
 427
 428        /*
 429         * Only recurse once to avoid overrunning the stack, let the unplug
 430         * handling reinvoke the handler shortly if we already got there.
 431         */
 432        if (!elv_queue_empty(q))
 433                blk_invoke_request_fn(q);
 434}
 435EXPORT_SYMBOL(__blk_run_queue);
 436
 437/**
 438 * blk_run_queue - run a single device queue
 439 * @q: The queue to run
 440 *
 441 * Description:
 442 *    Invoke request handling on this queue, if it has pending work to do.
 443 *    May be used to restart queueing when a request has completed. Also
 444 *    See @blk_start_queueing.
 445 *
 446 */
 447void blk_run_queue(struct request_queue *q)
 448{
 449        unsigned long flags;
 450
 451        spin_lock_irqsave(q->queue_lock, flags);
 452        __blk_run_queue(q);
 453        spin_unlock_irqrestore(q->queue_lock, flags);
 454}
 455EXPORT_SYMBOL(blk_run_queue);
 456
 457void blk_put_queue(struct request_queue *q)
 458{
 459        kobject_put(&q->kobj);
 460}
 461
 462void blk_cleanup_queue(struct request_queue *q)
 463{
 464        /*
 465         * We know we have process context here, so we can be a little
 466         * cautious and ensure that pending block actions on this device
 467         * are done before moving on. Going into this function, we should
 468         * not have processes doing IO to this device.
 469         */
 470        blk_sync_queue(q);
 471
 472        mutex_lock(&q->sysfs_lock);
 473        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 474        mutex_unlock(&q->sysfs_lock);
 475
 476        if (q->elevator)
 477                elevator_exit(q->elevator);
 478
 479        blk_put_queue(q);
 480}
 481EXPORT_SYMBOL(blk_cleanup_queue);
 482
 483static int blk_init_free_list(struct request_queue *q)
 484{
 485        struct request_list *rl = &q->rq;
 486
 487        rl->count[READ] = rl->count[WRITE] = 0;
 488        rl->starved[READ] = rl->starved[WRITE] = 0;
 489        rl->elvpriv = 0;
 490        init_waitqueue_head(&rl->wait[READ]);
 491        init_waitqueue_head(&rl->wait[WRITE]);
 492
 493        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
 494                                mempool_free_slab, request_cachep, q->node);
 495
 496        if (!rl->rq_pool)
 497                return -ENOMEM;
 498
 499        return 0;
 500}
 501
 502struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 503{
 504        return blk_alloc_queue_node(gfp_mask, -1);
 505}
 506EXPORT_SYMBOL(blk_alloc_queue);
 507
 508struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 509{
 510        struct request_queue *q;
 511        int err;
 512
 513        q = kmem_cache_alloc_node(blk_requestq_cachep,
 514                                gfp_mask | __GFP_ZERO, node_id);
 515        if (!q)
 516                return NULL;
 517
 518        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
 519        q->backing_dev_info.unplug_io_data = q;
 520        err = bdi_init(&q->backing_dev_info);
 521        if (err) {
 522                kmem_cache_free(blk_requestq_cachep, q);
 523                return NULL;
 524        }
 525
 526        init_timer(&q->unplug_timer);
 527        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 528        INIT_LIST_HEAD(&q->timeout_list);
 529        INIT_WORK(&q->unplug_work, blk_unplug_work);
 530
 531        kobject_init(&q->kobj, &blk_queue_ktype);
 532
 533        mutex_init(&q->sysfs_lock);
 534        spin_lock_init(&q->__queue_lock);
 535
 536        return q;
 537}
 538EXPORT_SYMBOL(blk_alloc_queue_node);
 539
 540/**
 541 * blk_init_queue  - prepare a request queue for use with a block device
 542 * @rfn:  The function to be called to process requests that have been
 543 *        placed on the queue.
 544 * @lock: Request queue spin lock
 545 *
 546 * Description:
 547 *    If a block device wishes to use the standard request handling procedures,
 548 *    which sorts requests and coalesces adjacent requests, then it must
 549 *    call blk_init_queue().  The function @rfn will be called when there
 550 *    are requests on the queue that need to be processed.  If the device
 551 *    supports plugging, then @rfn may not be called immediately when requests
 552 *    are available on the queue, but may be called at some time later instead.
 553 *    Plugged queues are generally unplugged when a buffer belonging to one
 554 *    of the requests on the queue is needed, or due to memory pressure.
 555 *
 556 *    @rfn is not required, or even expected, to remove all requests off the
 557 *    queue, but only as many as it can handle at a time.  If it does leave
 558 *    requests on the queue, it is responsible for arranging that the requests
 559 *    get dealt with eventually.
 560 *
 561 *    The queue spin lock must be held while manipulating the requests on the
 562 *    request queue; this lock will be taken also from interrupt context, so irq
 563 *    disabling is needed for it.
 564 *
 565 *    Function returns a pointer to the initialized request queue, or %NULL if
 566 *    it didn't succeed.
 567 *
 568 * Note:
 569 *    blk_init_queue() must be paired with a blk_cleanup_queue() call
 570 *    when the block device is deactivated (such as at module unload).
 571 **/
 572
 573struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 574{
 575        return blk_init_queue_node(rfn, lock, -1);
 576}
 577EXPORT_SYMBOL(blk_init_queue);
 578
 579struct request_queue *
 580blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 581{
 582        struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 583
 584        if (!q)
 585                return NULL;
 586
 587        q->node = node_id;
 588        if (blk_init_free_list(q)) {
 589                kmem_cache_free(blk_requestq_cachep, q);
 590                return NULL;
 591        }
 592
 593        /*
 594         * if caller didn't supply a lock, they get per-queue locking with
 595         * our embedded lock
 596         */
 597        if (!lock)
 598                lock = &q->__queue_lock;
 599
 600        q->request_fn           = rfn;
 601        q->prep_rq_fn           = NULL;
 602        q->unplug_fn            = generic_unplug_device;
 603        q->queue_flags          = QUEUE_FLAG_DEFAULT;
 604        q->queue_lock           = lock;
 605
 606        blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
 607
 608        blk_queue_make_request(q, __make_request);
 609        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
 610
 611        blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
 612        blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
 613
 614        q->sg_reserved_size = INT_MAX;
 615
 616        blk_set_cmd_filter_defaults(&q->cmd_filter);
 617
 618        /*
 619         * all done
 620         */
 621        if (!elevator_init(q, NULL)) {
 622                blk_queue_congestion_threshold(q);
 623                return q;
 624        }
 625
 626        blk_put_queue(q);
 627        return NULL;
 628}
 629EXPORT_SYMBOL(blk_init_queue_node);
 630
 631int blk_get_queue(struct request_queue *q)
 632{
 633        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
 634                kobject_get(&q->kobj);
 635                return 0;
 636        }
 637
 638        return 1;
 639}
 640
 641static inline void blk_free_request(struct request_queue *q, struct request *rq)
 642{
 643        if (rq->cmd_flags & REQ_ELVPRIV)
 644                elv_put_request(q, rq);
 645        mempool_free(rq, q->rq.rq_pool);
 646}
 647
 648static struct request *
 649blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
 650{
 651        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 652
 653        if (!rq)
 654                return NULL;
 655
 656        blk_rq_init(q, rq);
 657
 658        rq->cmd_flags = rw | REQ_ALLOCED;
 659
 660        if (priv) {
 661                if (unlikely(elv_set_request(q, rq, gfp_mask))) {
 662                        mempool_free(rq, q->rq.rq_pool);
 663                        return NULL;
 664                }
 665                rq->cmd_flags |= REQ_ELVPRIV;
 666        }
 667
 668        return rq;
 669}
 670
 671/*
 672 * ioc_batching returns true if the ioc is a valid batching request and
 673 * should be given priority access to a request.
 674 */
 675static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
 676{
 677        if (!ioc)
 678                return 0;
 679
 680        /*
 681         * Make sure the process is able to allocate at least 1 request
 682         * even if the batch times out, otherwise we could theoretically
 683         * lose wakeups.
 684         */
 685        return ioc->nr_batch_requests == q->nr_batching ||
 686                (ioc->nr_batch_requests > 0
 687                && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
 688}
 689
 690/*
 691 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
 692 * will cause the process to be a "batcher" on all queues in the system. This
 693 * is the behaviour we want though - once it gets a wakeup it should be given
 694 * a nice run.
 695 */
 696static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
 697{
 698        if (!ioc || ioc_batching(q, ioc))
 699                return;
 700
 701        ioc->nr_batch_requests = q->nr_batching;
 702        ioc->last_waited = jiffies;
 703}
 704
 705static void __freed_request(struct request_queue *q, int rw)
 706{
 707        struct request_list *rl = &q->rq;
 708
 709        if (rl->count[rw] < queue_congestion_off_threshold(q))
 710                blk_clear_queue_congested(q, rw);
 711
 712        if (rl->count[rw] + 1 <= q->nr_requests) {
 713                if (waitqueue_active(&rl->wait[rw]))
 714                        wake_up(&rl->wait[rw]);
 715
 716                blk_clear_queue_full(q, rw);
 717        }
 718}
 719
 720/*
 721 * A request has just been released.  Account for it, update the full and
 722 * congestion status, wake up any waiters.   Called under q->queue_lock.
 723 */
 724static void freed_request(struct request_queue *q, int rw, int priv)
 725{
 726        struct request_list *rl = &q->rq;
 727
 728        rl->count[rw]--;
 729        if (priv)
 730                rl->elvpriv--;
 731
 732        __freed_request(q, rw);
 733
 734        if (unlikely(rl->starved[rw ^ 1]))
 735                __freed_request(q, rw ^ 1);
 736}
 737
 738#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
 739/*
 740 * Get a free request, queue_lock must be held.
 741 * Returns NULL on failure, with queue_lock held.
 742 * Returns !NULL on success, with queue_lock *not held*.
 743 */
 744static struct request *get_request(struct request_queue *q, int rw_flags,
 745                                   struct bio *bio, gfp_t gfp_mask)
 746{
 747        struct request *rq = NULL;
 748        struct request_list *rl = &q->rq;
 749        struct io_context *ioc = NULL;
 750        const int rw = rw_flags & 0x01;
 751        int may_queue, priv;
 752
 753        may_queue = elv_may_queue(q, rw_flags);
 754        if (may_queue == ELV_MQUEUE_NO)
 755                goto rq_starved;
 756
 757        if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
 758                if (rl->count[rw]+1 >= q->nr_requests) {
 759                        ioc = current_io_context(GFP_ATOMIC, q->node);
 760                        /*
 761                         * The queue will fill after this allocation, so set
 762                         * it as full, and mark this process as "batching".
 763                         * This process will be allowed to complete a batch of
 764                         * requests, others will be blocked.
 765                         */
 766                        if (!blk_queue_full(q, rw)) {
 767                                ioc_set_batching(q, ioc);
 768                                blk_set_queue_full(q, rw);
 769                        } else {
 770                                if (may_queue != ELV_MQUEUE_MUST
 771                                                && !ioc_batching(q, ioc)) {
 772                                        /*
 773                                         * The queue is full and the allocating
 774                                         * process is not a "batcher", and not
 775                                         * exempted by the IO scheduler
 776                                         */
 777                                        goto out;
 778                                }
 779                        }
 780                }
 781                blk_set_queue_congested(q, rw);
 782        }
 783
 784        /*
 785         * Only allow batching queuers to allocate up to 50% over the defined
 786         * limit of requests, otherwise we could have thousands of requests
 787         * allocated with any setting of ->nr_requests
 788         */
 789        if (rl->count[rw] >= (3 * q->nr_requests / 2))
 790                goto out;
 791
 792        rl->count[rw]++;
 793        rl->starved[rw] = 0;
 794
 795        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
 796        if (priv)
 797                rl->elvpriv++;
 798
 799        spin_unlock_irq(q->queue_lock);
 800
 801        rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
 802        if (unlikely(!rq)) {
 803                /*
 804                 * Allocation failed presumably due to memory. Undo anything
 805                 * we might have messed up.
 806                 *
 807                 * Allocating task should really be put onto the front of the
 808                 * wait queue, but this is pretty rare.
 809                 */
 810                spin_lock_irq(q->queue_lock);
 811                freed_request(q, rw, priv);
 812
 813                /*
 814                 * in the very unlikely event that allocation failed and no
 815                 * requests for this direction was pending, mark us starved
 816                 * so that freeing of a request in the other direction will
 817                 * notice us. another possible fix would be to split the
 818                 * rq mempool into READ and WRITE
 819                 */
 820rq_starved:
 821                if (unlikely(rl->count[rw] == 0))
 822                        rl->starved[rw] = 1;
 823
 824                goto out;
 825        }
 826
 827        /*
 828         * ioc may be NULL here, and ioc_batching will be false. That's
 829         * OK, if the queue is under the request limit then requests need
 830         * not count toward the nr_batch_requests limit. There will always
 831         * be some limit enforced by BLK_BATCH_TIME.
 832         */
 833        if (ioc_batching(q, ioc))
 834                ioc->nr_batch_requests--;
 835
 836        trace_block_getrq(q, bio, rw);
 837out:
 838        return rq;
 839}
 840
 841/*
 842 * No available requests for this queue, unplug the device and wait for some
 843 * requests to become available.
 844 *
 845 * Called with q->queue_lock held, and returns with it unlocked.
 846 */
 847static struct request *get_request_wait(struct request_queue *q, int rw_flags,
 848                                        struct bio *bio)
 849{
 850        const int rw = rw_flags & 0x01;
 851        struct request *rq;
 852
 853        rq = get_request(q, rw_flags, bio, GFP_NOIO);
 854        while (!rq) {
 855                DEFINE_WAIT(wait);
 856                struct io_context *ioc;
 857                struct request_list *rl = &q->rq;
 858
 859                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
 860                                TASK_UNINTERRUPTIBLE);
 861
 862                trace_block_sleeprq(q, bio, rw);
 863
 864                __generic_unplug_device(q);
 865                spin_unlock_irq(q->queue_lock);
 866                io_schedule();
 867
 868                /*
 869                 * After sleeping, we become a "batching" process and
 870                 * will be able to allocate at least one request, and
 871                 * up to a big batch of them for a small period time.
 872                 * See ioc_batching, ioc_set_batching
 873                 */
 874                ioc = current_io_context(GFP_NOIO, q->node);
 875                ioc_set_batching(q, ioc);
 876
 877                spin_lock_irq(q->queue_lock);
 878                finish_wait(&rl->wait[rw], &wait);
 879
 880                rq = get_request(q, rw_flags, bio, GFP_NOIO);
 881        };
 882
 883        return rq;
 884}
 885
 886struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 887{
 888        struct request *rq;
 889
 890        BUG_ON(rw != READ && rw != WRITE);
 891
 892        spin_lock_irq(q->queue_lock);
 893        if (gfp_mask & __GFP_WAIT) {
 894                rq = get_request_wait(q, rw, NULL);
 895        } else {
 896                rq = get_request(q, rw, NULL, gfp_mask);
 897                if (!rq)
 898                        spin_unlock_irq(q->queue_lock);
 899        }
 900        /* q->queue_lock is unlocked at this point */
 901
 902        return rq;
 903}
 904EXPORT_SYMBOL(blk_get_request);
 905
 906/**
 907 * blk_start_queueing - initiate dispatch of requests to device
 908 * @q:          request queue to kick into gear
 909 *
 910 * This is basically a helper to remove the need to know whether a queue
 911 * is plugged or not if someone just wants to initiate dispatch of requests
 912 * for this queue. Should be used to start queueing on a device outside
 913 * of ->request_fn() context. Also see @blk_run_queue.
 914 *
 915 * The queue lock must be held with interrupts disabled.
 916 */
 917void blk_start_queueing(struct request_queue *q)
 918{
 919        if (!blk_queue_plugged(q)) {
 920                if (unlikely(blk_queue_stopped(q)))
 921                        return;
 922                q->request_fn(q);
 923        } else
 924                __generic_unplug_device(q);
 925}
 926EXPORT_SYMBOL(blk_start_queueing);
 927
 928/**
 929 * blk_requeue_request - put a request back on queue
 930 * @q:          request queue where request should be inserted
 931 * @rq:         request to be inserted
 932 *
 933 * Description:
 934 *    Drivers often keep queueing requests until the hardware cannot accept
 935 *    more, when that condition happens we need to put the request back
 936 *    on the queue. Must be called with queue lock held.
 937 */
 938void blk_requeue_request(struct request_queue *q, struct request *rq)
 939{
 940        blk_delete_timer(rq);
 941        blk_clear_rq_complete(rq);
 942        trace_block_rq_requeue(q, rq);
 943
 944        if (blk_rq_tagged(rq))
 945                blk_queue_end_tag(q, rq);
 946
 947        elv_requeue_request(q, rq);
 948}
 949EXPORT_SYMBOL(blk_requeue_request);
 950
 951/**
 952 * blk_insert_request - insert a special request into a request queue
 953 * @q:          request queue where request should be inserted
 954 * @rq:         request to be inserted
 955 * @at_head:    insert request at head or tail of queue
 956 * @data:       private data
 957 *
 958 * Description:
 959 *    Many block devices need to execute commands asynchronously, so they don't
 960 *    block the whole kernel from preemption during request execution.  This is
 961 *    accomplished normally by inserting aritficial requests tagged as
 962 *    REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
 963 *    be scheduled for actual execution by the request queue.
 964 *
 965 *    We have the option of inserting the head or the tail of the queue.
 966 *    Typically we use the tail for new ioctls and so forth.  We use the head
 967 *    of the queue for things like a QUEUE_FULL message from a device, or a
 968 *    host that is unable to accept a particular command.
 969 */
 970void blk_insert_request(struct request_queue *q, struct request *rq,
 971                        int at_head, void *data)
 972{
 973        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 974        unsigned long flags;
 975
 976        /*
 977         * tell I/O scheduler that this isn't a regular read/write (ie it
 978         * must not attempt merges on this) and that it acts as a soft
 979         * barrier
 980         */
 981        rq->cmd_type = REQ_TYPE_SPECIAL;
 982        rq->cmd_flags |= REQ_SOFTBARRIER;
 983
 984        rq->special = data;
 985
 986        spin_lock_irqsave(q->queue_lock, flags);
 987
 988        /*
 989         * If command is tagged, release the tag
 990         */
 991        if (blk_rq_tagged(rq))
 992                blk_queue_end_tag(q, rq);
 993
 994        drive_stat_acct(rq, 1);
 995        __elv_add_request(q, rq, where, 0);
 996        blk_start_queueing(q);
 997        spin_unlock_irqrestore(q->queue_lock, flags);
 998}
 999EXPORT_SYMBOL(blk_insert_request);
1000
1001/*
1002 * add-request adds a request to the linked list.
1003 * queue lock is held and interrupts disabled, as we muck with the
1004 * request queue list.
1005 */
1006static inline void add_request(struct request_queue *q, struct request *req)
1007{
1008        drive_stat_acct(req, 1);
1009
1010        /*
1011         * elevator indicated where it wants this request to be
1012         * inserted at elevator_merge time
1013         */
1014        __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1015}
1016
1017static void part_round_stats_single(int cpu, struct hd_struct *part,
1018                                    unsigned long now)
1019{
1020        if (now == part->stamp)
1021                return;
1022
1023        if (part->in_flight) {
1024                __part_stat_add(cpu, part, time_in_queue,
1025                                part->in_flight * (now - part->stamp));
1026                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1027        }
1028        part->stamp = now;
1029}
1030
1031/**
1032 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1033 * @cpu: cpu number for stats access
1034 * @part: target partition
1035 *
1036 * The average IO queue length and utilisation statistics are maintained
1037 * by observing the current state of the queue length and the amount of
1038 * time it has been in this state for.
1039 *
1040 * Normally, that accounting is done on IO completion, but that can result
1041 * in more than a second's worth of IO being accounted for within any one
1042 * second, leading to >100% utilisation.  To deal with that, we call this
1043 * function to do a round-off before returning the results when reading
1044 * /proc/diskstats.  This accounts immediately for all queue usage up to
1045 * the current jiffies and restarts the counters again.
1046 */
1047void part_round_stats(int cpu, struct hd_struct *part)
1048{
1049        unsigned long now = jiffies;
1050
1051        if (part->partno)
1052                part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1053        part_round_stats_single(cpu, part, now);
1054}
1055EXPORT_SYMBOL_GPL(part_round_stats);
1056
1057/*
1058 * queue lock must be held
1059 */
1060void __blk_put_request(struct request_queue *q, struct request *req)
1061{
1062        if (unlikely(!q))
1063                return;
1064        if (unlikely(--req->ref_count))
1065                return;
1066
1067        elv_completed_request(q, req);
1068
1069        /*
1070         * Request may not have originated from ll_rw_blk. if not,
1071         * it didn't come out of our reserved rq pools
1072         */
1073        if (req->cmd_flags & REQ_ALLOCED) {
1074                int rw = rq_data_dir(req);
1075                int priv = req->cmd_flags & REQ_ELVPRIV;
1076
1077                BUG_ON(!list_empty(&req->queuelist));
1078                BUG_ON(!hlist_unhashed(&req->hash));
1079
1080                blk_free_request(q, req);
1081                freed_request(q, rw, priv);
1082        }
1083}
1084EXPORT_SYMBOL_GPL(__blk_put_request);
1085
1086void blk_put_request(struct request *req)
1087{
1088        unsigned long flags;
1089        struct request_queue *q = req->q;
1090
1091        spin_lock_irqsave(q->queue_lock, flags);
1092        __blk_put_request(q, req);
1093        spin_unlock_irqrestore(q->queue_lock, flags);
1094}
1095EXPORT_SYMBOL(blk_put_request);
1096
1097void init_request_from_bio(struct request *req, struct bio *bio)
1098{
1099        req->cpu = bio->bi_comp_cpu;
1100        req->cmd_type = REQ_TYPE_FS;
1101
1102        /*
1103         * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1104         */
1105        if (bio_rw_ahead(bio))
1106                req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
1107                                   REQ_FAILFAST_DRIVER);
1108        if (bio_failfast_dev(bio))
1109                req->cmd_flags |= REQ_FAILFAST_DEV;
1110        if (bio_failfast_transport(bio))
1111                req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
1112        if (bio_failfast_driver(bio))
1113                req->cmd_flags |= REQ_FAILFAST_DRIVER;
1114
1115        /*
1116         * REQ_BARRIER implies no merging, but lets make it explicit
1117         */
1118        if (unlikely(bio_discard(bio))) {
1119                req->cmd_flags |= REQ_DISCARD;
1120                if (bio_barrier(bio))
1121                        req->cmd_flags |= REQ_SOFTBARRIER;
1122                req->q->prepare_discard_fn(req->q, req);
1123        } else if (unlikely(bio_barrier(bio)))
1124                req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1125
1126        if (bio_sync(bio))
1127                req->cmd_flags |= REQ_RW_SYNC;
1128        if (bio_unplug(bio))
1129                req->cmd_flags |= REQ_UNPLUG;
1130        if (bio_rw_meta(bio))
1131                req->cmd_flags |= REQ_RW_META;
1132
1133        req->errors = 0;
1134        req->hard_sector = req->sector = bio->bi_sector;
1135        req->ioprio = bio_prio(bio);
1136        req->start_time = jiffies;
1137        blk_rq_bio_prep(req->q, req, bio);
1138}
1139
1140static int __make_request(struct request_queue *q, struct bio *bio)
1141{
1142        struct request *req;
1143        int el_ret, nr_sectors;
1144        const unsigned short prio = bio_prio(bio);
1145        const int sync = bio_sync(bio);
1146        const int unplug = bio_unplug(bio);
1147        int rw_flags;
1148
1149        nr_sectors = bio_sectors(bio);
1150
1151        /*
1152         * low level driver can indicate that it wants pages above a
1153         * certain limit bounced to low memory (ie for highmem, or even
1154         * ISA dma in theory)
1155         */
1156        blk_queue_bounce(q, &bio);
1157
1158        spin_lock_irq(q->queue_lock);
1159
1160        if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
1161                goto get_rq;
1162
1163        el_ret = elv_merge(q, &req, bio);
1164        switch (el_ret) {
1165        case ELEVATOR_BACK_MERGE:
1166                BUG_ON(!rq_mergeable(req));
1167
1168                if (!ll_back_merge_fn(q, req, bio))
1169                        break;
1170
1171                trace_block_bio_backmerge(q, bio);
1172
1173                req->biotail->bi_next = bio;
1174                req->biotail = bio;
1175                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1176                req->ioprio = ioprio_best(req->ioprio, prio);
1177                if (!blk_rq_cpu_valid(req))
1178                        req->cpu = bio->bi_comp_cpu;
1179                drive_stat_acct(req, 0);
1180                if (!attempt_back_merge(q, req))
1181                        elv_merged_request(q, req, el_ret);
1182                goto out;
1183
1184        case ELEVATOR_FRONT_MERGE:
1185                BUG_ON(!rq_mergeable(req));
1186
1187                if (!ll_front_merge_fn(q, req, bio))
1188                        break;
1189
1190                trace_block_bio_frontmerge(q, bio);
1191
1192                bio->bi_next = req->bio;
1193                req->bio = bio;
1194
1195                /*
1196                 * may not be valid. if the low level driver said
1197                 * it didn't need a bounce buffer then it better
1198                 * not touch req->buffer either...
1199                 */
1200                req->buffer = bio_data(bio);
1201                req->current_nr_sectors = bio_cur_sectors(bio);
1202                req->hard_cur_sectors = req->current_nr_sectors;
1203                req->sector = req->hard_sector = bio->bi_sector;
1204                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1205                req->ioprio = ioprio_best(req->ioprio, prio);
1206                if (!blk_rq_cpu_valid(req))
1207                        req->cpu = bio->bi_comp_cpu;
1208                drive_stat_acct(req, 0);
1209                if (!attempt_front_merge(q, req))
1210                        elv_merged_request(q, req, el_ret);
1211                goto out;
1212
1213        /* ELV_NO_MERGE: elevator says don't/can't merge. */
1214        default:
1215                ;
1216        }
1217
1218get_rq:
1219        /*
1220         * This sync check and mask will be re-done in init_request_from_bio(),
1221         * but we need to set it earlier to expose the sync flag to the
1222         * rq allocator and io schedulers.
1223         */
1224        rw_flags = bio_data_dir(bio);
1225        if (sync)
1226                rw_flags |= REQ_RW_SYNC;
1227
1228        /*
1229         * Grab a free request. This is might sleep but can not fail.
1230         * Returns with the queue unlocked.
1231         */
1232        req = get_request_wait(q, rw_flags, bio);
1233
1234        /*
1235         * After dropping the lock and possibly sleeping here, our request
1236         * may now be mergeable after it had proven unmergeable (above).
1237         * We don't worry about that case for efficiency. It won't happen
1238         * often, and the elevators are able to handle it.
1239         */
1240        init_request_from_bio(req, bio);
1241
1242        spin_lock_irq(q->queue_lock);
1243        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
1244            bio_flagged(bio, BIO_CPU_AFFINE))
1245                req->cpu = blk_cpu_to_group(smp_processor_id());
1246        if (!blk_queue_nonrot(q) && elv_queue_empty(q))
1247                blk_plug_device(q);
1248        add_request(q, req);
1249out:
1250        if (unplug || blk_queue_nonrot(q))
1251                __generic_unplug_device(q);
1252        spin_unlock_irq(q->queue_lock);
1253        return 0;
1254}
1255
1256/*
1257 * If bio->bi_dev is a partition, remap the location
1258 */
1259static inline void blk_partition_remap(struct bio *bio)
1260{
1261        struct block_device *bdev = bio->bi_bdev;
1262
1263        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1264                struct hd_struct *p = bdev->bd_part;
1265
1266                bio->bi_sector += p->start_sect;
1267                bio->bi_bdev = bdev->bd_contains;
1268
1269                trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
1270                                    bdev->bd_dev, bio->bi_sector,
1271                                    bio->bi_sector - p->start_sect);
1272        }
1273}
1274
1275static void handle_bad_sector(struct bio *bio)
1276{
1277        char b[BDEVNAME_SIZE];
1278
1279        printk(KERN_INFO "attempt to access beyond end of device\n");
1280        printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1281                        bdevname(bio->bi_bdev, b),
1282                        bio->bi_rw,
1283                        (unsigned long long)bio->bi_sector + bio_sectors(bio),
1284                        (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1285
1286        set_bit(BIO_EOF, &bio->bi_flags);
1287}
1288
1289#ifdef CONFIG_FAIL_MAKE_REQUEST
1290
1291static DECLARE_FAULT_ATTR(fail_make_request);
1292
1293static int __init setup_fail_make_request(char *str)
1294{
1295        return setup_fault_attr(&fail_make_request, str);
1296}
1297__setup("fail_make_request=", setup_fail_make_request);
1298
1299static int should_fail_request(struct bio *bio)
1300{
1301        struct hd_struct *part = bio->bi_bdev->bd_part;
1302
1303        if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
1304                return should_fail(&fail_make_request, bio->bi_size);
1305
1306        return 0;
1307}
1308
1309static int __init fail_make_request_debugfs(void)
1310{
1311        return init_fault_attr_dentries(&fail_make_request,
1312                                        "fail_make_request");
1313}
1314
1315late_initcall(fail_make_request_debugfs);
1316
1317#else /* CONFIG_FAIL_MAKE_REQUEST */
1318
1319static inline int should_fail_request(struct bio *bio)
1320{
1321        return 0;
1322}
1323
1324#endif /* CONFIG_FAIL_MAKE_REQUEST */
1325
1326/*
1327 * Check whether this bio extends beyond the end of the device.
1328 */
1329static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1330{
1331        sector_t maxsector;
1332
1333        if (!nr_sectors)
1334                return 0;
1335
1336        /* Test device or partition size, when known. */
1337        maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1338        if (maxsector) {
1339                sector_t sector = bio->bi_sector;
1340
1341                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1342                        /*
1343                         * This may well happen - the kernel calls bread()
1344                         * without checking the size of the device, e.g., when
1345                         * mounting a device.
1346                         */
1347                        handle_bad_sector(bio);
1348                        return 1;
1349                }
1350        }
1351
1352        return 0;
1353}
1354
1355/**
1356 * generic_make_request - hand a buffer to its device driver for I/O
1357 * @bio:  The bio describing the location in memory and on the device.
1358 *
1359 * generic_make_request() is used to make I/O requests of block
1360 * devices. It is passed a &struct bio, which describes the I/O that needs
1361 * to be done.
1362 *
1363 * generic_make_request() does not return any status.  The
1364 * success/failure status of the request, along with notification of
1365 * completion, is delivered asynchronously through the bio->bi_end_io
1366 * function described (one day) else where.
1367 *
1368 * The caller of generic_make_request must make sure that bi_io_vec
1369 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1370 * set to describe the device address, and the
1371 * bi_end_io and optionally bi_private are set to describe how
1372 * completion notification should be signaled.
1373 *
1374 * generic_make_request and the drivers it calls may use bi_next if this
1375 * bio happens to be merged with someone else, and may change bi_dev and
1376 * bi_sector for remaps as it sees fit.  So the values of these fields
1377 * should NOT be depended on after the call to generic_make_request.
1378 */
1379static inline void __generic_make_request(struct bio *bio)
1380{
1381        struct request_queue *q;
1382        sector_t old_sector;
1383        int ret, nr_sectors = bio_sectors(bio);
1384        dev_t old_dev;
1385        int err = -EIO;
1386
1387        might_sleep();
1388
1389        if (bio_check_eod(bio, nr_sectors))
1390                goto end_io;
1391
1392        /*
1393         * Resolve the mapping until finished. (drivers are
1394         * still free to implement/resolve their own stacking
1395         * by explicitly returning 0)
1396         *
1397         * NOTE: we don't repeat the blk_size check for each new device.
1398         * Stacking drivers are expected to know what they are doing.
1399         */
1400        old_sector = -1;
1401        old_dev = 0;
1402        do {
1403                char b[BDEVNAME_SIZE];
1404
1405                q = bdev_get_queue(bio->bi_bdev);
1406                if (unlikely(!q)) {
1407                        printk(KERN_ERR
1408                               "generic_make_request: Trying to access "
1409                                "nonexistent block-device %s (%Lu)\n",
1410                                bdevname(bio->bi_bdev, b),
1411                                (long long) bio->bi_sector);
1412                        goto end_io;
1413                }
1414
1415                if (unlikely(nr_sectors > q->max_hw_sectors)) {
1416                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1417                                bdevname(bio->bi_bdev, b),
1418                                bio_sectors(bio),
1419                                q->max_hw_sectors);
1420                        goto end_io;
1421                }
1422
1423                if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1424                        goto end_io;
1425
1426                if (should_fail_request(bio))
1427                        goto end_io;
1428
1429                /*
1430                 * If this device has partitions, remap block n
1431                 * of partition p to block n+start(p) of the disk.
1432                 */
1433                blk_partition_remap(bio);
1434
1435                if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1436                        goto end_io;
1437
1438                if (old_sector != -1)
1439                        trace_block_remap(q, bio, old_dev, bio->bi_sector,
1440                                            old_sector);
1441
1442                trace_block_bio_queue(q, bio);
1443
1444                old_sector = bio->bi_sector;
1445                old_dev = bio->bi_bdev->bd_dev;
1446
1447                if (bio_check_eod(bio, nr_sectors))
1448                        goto end_io;
1449
1450                if (bio_discard(bio) && !q->prepare_discard_fn) {
1451                        err = -EOPNOTSUPP;
1452                        goto end_io;
1453                }
1454                if (bio_barrier(bio) && bio_has_data(bio) &&
1455                    (q->next_ordered == QUEUE_ORDERED_NONE)) {
1456                        err = -EOPNOTSUPP;
1457                        goto end_io;
1458                }
1459
1460                ret = q->make_request_fn(q, bio);
1461        } while (ret);
1462
1463        return;
1464
1465end_io:
1466        bio_endio(bio, err);
1467}
1468
1469/*
1470 * We only want one ->make_request_fn to be active at a time,
1471 * else stack usage with stacked devices could be a problem.
1472 * So use current->bio_{list,tail} to keep a list of requests
1473 * submited by a make_request_fn function.
1474 * current->bio_tail is also used as a flag to say if
1475 * generic_make_request is currently active in this task or not.
1476 * If it is NULL, then no make_request is active.  If it is non-NULL,
1477 * then a make_request is active, and new requests should be added
1478 * at the tail
1479 */
1480void generic_make_request(struct bio *bio)
1481{
1482        if (current->bio_tail) {
1483                /* make_request is active */
1484                *(current->bio_tail) = bio;
1485                bio->bi_next = NULL;
1486                current->bio_tail = &bio->bi_next;
1487                return;
1488        }
1489        /* following loop may be a bit non-obvious, and so deserves some
1490         * explanation.
1491         * Before entering the loop, bio->bi_next is NULL (as all callers
1492         * ensure that) so we have a list with a single bio.
1493         * We pretend that we have just taken it off a longer list, so
1494         * we assign bio_list to the next (which is NULL) and bio_tail
1495         * to &bio_list, thus initialising the bio_list of new bios to be
1496         * added.  __generic_make_request may indeed add some more bios
1497         * through a recursive call to generic_make_request.  If it
1498         * did, we find a non-NULL value in bio_list and re-enter the loop
1499         * from the top.  In this case we really did just take the bio
1500         * of the top of the list (no pretending) and so fixup bio_list and
1501         * bio_tail or bi_next, and call into __generic_make_request again.
1502         *
1503         * The loop was structured like this to make only one call to
1504         * __generic_make_request (which is important as it is large and
1505         * inlined) and to keep the structure simple.
1506         */
1507        BUG_ON(bio->bi_next);
1508        do {
1509                current->bio_list = bio->bi_next;
1510                if (bio->bi_next == NULL)
1511                        current->bio_tail = &current->bio_list;
1512                else
1513                        bio->bi_next = NULL;
1514                __generic_make_request(bio);
1515                bio = current->bio_list;
1516        } while (bio);
1517        current->bio_tail = NULL; /* deactivate */
1518}
1519EXPORT_SYMBOL(generic_make_request);
1520
1521/**
1522 * submit_bio - submit a bio to the block device layer for I/O
1523 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1524 * @bio: The &struct bio which describes the I/O
1525 *
1526 * submit_bio() is very similar in purpose to generic_make_request(), and
1527 * uses that function to do most of the work. Both are fairly rough
1528 * interfaces; @bio must be presetup and ready for I/O.
1529 *
1530 */
1531void submit_bio(int rw, struct bio *bio)
1532{
1533        int count = bio_sectors(bio);
1534
1535        bio->bi_rw |= rw;
1536
1537        /*
1538         * If it's a regular read/write or a barrier with data attached,
1539         * go through the normal accounting stuff before submission.
1540         */
1541        if (bio_has_data(bio)) {
1542                if (rw & WRITE) {
1543                        count_vm_events(PGPGOUT, count);
1544                } else {
1545                        task_io_account_read(bio->bi_size);
1546                        count_vm_events(PGPGIN, count);
1547                }
1548
1549                if (unlikely(block_dump)) {
1550                        char b[BDEVNAME_SIZE];
1551                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1552                        current->comm, task_pid_nr(current),
1553                                (rw & WRITE) ? "WRITE" : "READ",
1554                                (unsigned long long)bio->bi_sector,
1555                                bdevname(bio->bi_bdev, b));
1556                }
1557        }
1558
1559        generic_make_request(bio);
1560}
1561EXPORT_SYMBOL(submit_bio);
1562
1563/**
1564 * blk_rq_check_limits - Helper function to check a request for the queue limit
1565 * @q:  the queue
1566 * @rq: the request being checked
1567 *
1568 * Description:
1569 *    @rq may have been made based on weaker limitations of upper-level queues
1570 *    in request stacking drivers, and it may violate the limitation of @q.
1571 *    Since the block layer and the underlying device driver trust @rq
1572 *    after it is inserted to @q, it should be checked against @q before
1573 *    the insertion using this generic function.
1574 *
1575 *    This function should also be useful for request stacking drivers
1576 *    in some cases below, so export this fuction.
1577 *    Request stacking drivers like request-based dm may change the queue
1578 *    limits while requests are in the queue (e.g. dm's table swapping).
1579 *    Such request stacking drivers should check those requests agaist
1580 *    the new queue limits again when they dispatch those requests,
1581 *    although such checkings are also done against the old queue limits
1582 *    when submitting requests.
1583 */
1584int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1585{
1586        if (rq->nr_sectors > q->max_sectors ||
1587            rq->data_len > q->max_hw_sectors << 9) {
1588                printk(KERN_ERR "%s: over max size limit.\n", __func__);
1589                return -EIO;
1590        }
1591
1592        /*
1593         * queue's settings related to segment counting like q->bounce_pfn
1594         * may differ from that of other stacking queues.
1595         * Recalculate it to check the request correctly on this queue's
1596         * limitation.
1597         */
1598        blk_recalc_rq_segments(rq);
1599        if (rq->nr_phys_segments > q->max_phys_segments ||
1600            rq->nr_phys_segments > q->max_hw_segments) {
1601                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1602                return -EIO;
1603        }
1604
1605        return 0;
1606}
1607EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1608
1609/**
1610 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1611 * @q:  the queue to submit the request
1612 * @rq: the request being queued
1613 */
1614int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1615{
1616        unsigned long flags;
1617
1618        if (blk_rq_check_limits(q, rq))
1619                return -EIO;
1620
1621#ifdef CONFIG_FAIL_MAKE_REQUEST
1622        if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
1623            should_fail(&fail_make_request, blk_rq_bytes(rq)))
1624                return -EIO;
1625#endif
1626
1627        spin_lock_irqsave(q->queue_lock, flags);
1628
1629        /*
1630         * Submitting request must be dequeued before calling this function
1631         * because it will be linked to another request_queue
1632         */
1633        BUG_ON(blk_queued_rq(rq));
1634
1635        drive_stat_acct(rq, 1);
1636        __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1637
1638        spin_unlock_irqrestore(q->queue_lock, flags);
1639
1640        return 0;
1641}
1642EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1643
1644/**
1645 * blkdev_dequeue_request - dequeue request and start timeout timer
1646 * @req: request to dequeue
1647 *
1648 * Dequeue @req and start timeout timer on it.  This hands off the
1649 * request to the driver.
1650 *
1651 * Block internal functions which don't want to start timer should
1652 * call elv_dequeue_request().
1653 */
1654void blkdev_dequeue_request(struct request *req)
1655{
1656        elv_dequeue_request(req->q, req);
1657
1658        /*
1659         * We are now handing the request to the hardware, add the
1660         * timeout handler.
1661         */
1662        blk_add_timer(req);
1663}
1664EXPORT_SYMBOL(blkdev_dequeue_request);
1665
1666static void blk_account_io_completion(struct request *req, unsigned int bytes)
1667{
1668        struct gendisk *disk = req->rq_disk;
1669
1670        if (!disk || !blk_do_io_stat(disk->queue))
1671                return;
1672
1673        if (blk_fs_request(req)) {
1674                const int rw = rq_data_dir(req);
1675                struct hd_struct *part;
1676                int cpu;
1677
1678                cpu = part_stat_lock();
1679                part = disk_map_sector_rcu(req->rq_disk, req->sector);
1680                part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1681                part_stat_unlock();
1682        }
1683}
1684
1685static void blk_account_io_done(struct request *req)
1686{
1687        struct gendisk *disk = req->rq_disk;
1688
1689        if (!disk || !blk_do_io_stat(disk->queue))
1690                return;
1691
1692        /*
1693         * Account IO completion.  bar_rq isn't accounted as a normal
1694         * IO on queueing nor completion.  Accounting the containing
1695         * request is enough.
1696         */
1697        if (blk_fs_request(req) && req != &req->q->bar_rq) {
1698                unsigned long duration = jiffies - req->start_time;
1699                const int rw = rq_data_dir(req);
1700                struct hd_struct *part;
1701                int cpu;
1702
1703                cpu = part_stat_lock();
1704                part = disk_map_sector_rcu(disk, req->sector);
1705
1706                part_stat_inc(cpu, part, ios[rw]);
1707                part_stat_add(cpu, part, ticks[rw], duration);
1708                part_round_stats(cpu, part);
1709                part_dec_in_flight(part);
1710
1711                part_stat_unlock();
1712        }
1713}
1714
1715/**
1716 * __end_that_request_first - end I/O on a request
1717 * @req:      the request being processed
1718 * @error:    %0 for success, < %0 for error
1719 * @nr_bytes: number of bytes to complete
1720 *
1721 * Description:
1722 *     Ends I/O on a number of bytes attached to @req, and sets it up
1723 *     for the next range of segments (if any) in the cluster.
1724 *
1725 * Return:
1726 *     %0 - we are done with this request, call end_that_request_last()
1727 *     %1 - still buffers pending for this request
1728 **/
1729static int __end_that_request_first(struct request *req, int error,
1730                                    int nr_bytes)
1731{
1732        int total_bytes, bio_nbytes, next_idx = 0;
1733        struct bio *bio;
1734
1735        trace_block_rq_complete(req->q, req);
1736
1737        /*
1738         * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
1739         * sense key with us all the way through
1740         */
1741        if (!blk_pc_request(req))
1742                req->errors = 0;
1743
1744        if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1745                printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1746                                req->rq_disk ? req->rq_disk->disk_name : "?",
1747                                (unsigned long long)req->sector);
1748        }
1749
1750        blk_account_io_completion(req, nr_bytes);
1751
1752        total_bytes = bio_nbytes = 0;
1753        while ((bio = req->bio) != NULL) {
1754                int nbytes;
1755
1756                if (nr_bytes >= bio->bi_size) {
1757                        req->bio = bio->bi_next;
1758                        nbytes = bio->bi_size;
1759                        req_bio_endio(req, bio, nbytes, error);
1760                        next_idx = 0;
1761                        bio_nbytes = 0;
1762                } else {
1763                        int idx = bio->bi_idx + next_idx;
1764
1765                        if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1766                                blk_dump_rq_flags(req, "__end_that");
1767                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1768                                       __func__, bio->bi_idx, bio->bi_vcnt);
1769                                break;
1770                        }
1771
1772                        nbytes = bio_iovec_idx(bio, idx)->bv_len;
1773                        BIO_BUG_ON(nbytes > bio->bi_size);
1774
1775                        /*
1776                         * not a complete bvec done
1777                         */
1778                        if (unlikely(nbytes > nr_bytes)) {
1779                                bio_nbytes += nr_bytes;
1780                                total_bytes += nr_bytes;
1781                                break;
1782                        }
1783
1784                        /*
1785                         * advance to the next vector
1786                         */
1787                        next_idx++;
1788                        bio_nbytes += nbytes;
1789                }
1790
1791                total_bytes += nbytes;
1792                nr_bytes -= nbytes;
1793
1794                bio = req->bio;
1795                if (bio) {
1796                        /*
1797                         * end more in this run, or just return 'not-done'
1798                         */
1799                        if (unlikely(nr_bytes <= 0))
1800                                break;
1801                }
1802        }
1803
1804        /*
1805         * completely done
1806         */
1807        if (!req->bio)
1808                return 0;
1809
1810        /*
1811         * if the request wasn't completed, update state
1812         */
1813        if (bio_nbytes) {
1814                req_bio_endio(req, bio, bio_nbytes, error);
1815                bio->bi_idx += next_idx;
1816                bio_iovec(bio)->bv_offset += nr_bytes;
1817                bio_iovec(bio)->bv_len -= nr_bytes;
1818        }
1819
1820        blk_recalc_rq_sectors(req, total_bytes >> 9);
1821        blk_recalc_rq_segments(req);
1822        return 1;
1823}
1824
1825/*
1826 * queue lock must be held
1827 */
1828static void end_that_request_last(struct request *req, int error)
1829{
1830        if (blk_rq_tagged(req))
1831                blk_queue_end_tag(req->q, req);
1832
1833        if (blk_queued_rq(req))
1834                elv_dequeue_request(req->q, req);
1835
1836        if (unlikely(laptop_mode) && blk_fs_request(req))
1837                laptop_io_completion();
1838
1839        blk_delete_timer(req);
1840
1841        blk_account_io_done(req);
1842
1843        if (req->end_io)
1844                req->end_io(req, error);
1845        else {
1846                if (blk_bidi_rq(req))
1847                        __blk_put_request(req->next_rq->q, req->next_rq);
1848
1849                __blk_put_request(req->q, req);
1850        }
1851}
1852
1853/**
1854 * blk_rq_bytes - Returns bytes left to complete in the entire request
1855 * @rq: the request being processed
1856 **/
1857unsigned int blk_rq_bytes(struct request *rq)
1858{
1859        if (blk_fs_request(rq))
1860                return rq->hard_nr_sectors << 9;
1861
1862        return rq->data_len;
1863}
1864EXPORT_SYMBOL_GPL(blk_rq_bytes);
1865
1866/**
1867 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1868 * @rq: the request being processed
1869 **/
1870unsigned int blk_rq_cur_bytes(struct request *rq)
1871{
1872        if (blk_fs_request(rq))
1873                return rq->current_nr_sectors << 9;
1874
1875        if (rq->bio)
1876                return rq->bio->bi_size;
1877
1878        return rq->data_len;
1879}
1880EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1881
1882/**
1883 * end_request - end I/O on the current segment of the request
1884 * @req:        the request being processed
1885 * @uptodate:   error value or %0/%1 uptodate flag
1886 *
1887 * Description:
1888 *     Ends I/O on the current segment of a request. If that is the only
1889 *     remaining segment, the request is also completed and freed.
1890 *
1891 *     This is a remnant of how older block drivers handled I/O completions.
1892 *     Modern drivers typically end I/O on the full request in one go, unless
1893 *     they have a residual value to account for. For that case this function
1894 *     isn't really useful, unless the residual just happens to be the
1895 *     full current segment. In other words, don't use this function in new
1896 *     code. Use blk_end_request() or __blk_end_request() to end a request.
1897 **/
1898void end_request(struct request *req, int uptodate)
1899{
1900        int error = 0;
1901
1902        if (uptodate <= 0)
1903                error = uptodate ? uptodate : -EIO;
1904
1905        __blk_end_request(req, error, req->hard_cur_sectors << 9);
1906}
1907EXPORT_SYMBOL(end_request);
1908
1909static int end_that_request_data(struct request *rq, int error,
1910                                 unsigned int nr_bytes, unsigned int bidi_bytes)
1911{
1912        if (rq->bio) {
1913                if (__end_that_request_first(rq, error, nr_bytes))
1914                        return 1;
1915
1916                /* Bidi request must be completed as a whole */
1917                if (blk_bidi_rq(rq) &&
1918                    __end_that_request_first(rq->next_rq, error, bidi_bytes))
1919                        return 1;
1920        }
1921
1922        return 0;
1923}
1924
1925/**
1926 * blk_end_io - Generic end_io function to complete a request.
1927 * @rq:           the request being processed
1928 * @error:        %0 for success, < %0 for error
1929 * @nr_bytes:     number of bytes to complete @rq
1930 * @bidi_bytes:   number of bytes to complete @rq->next_rq
1931 * @drv_callback: function called between completion of bios in the request
1932 *                and completion of the request.
1933 *                If the callback returns non %0, this helper returns without
1934 *                completion of the request.
1935 *
1936 * Description:
1937 *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1938 *     If @rq has leftover, sets it up for the next range of segments.
1939 *
1940 * Return:
1941 *     %0 - we are done with this request
1942 *     %1 - this request is not freed yet, it still has pending buffers.
1943 **/
1944static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1945                      unsigned int bidi_bytes,
1946                      int (drv_callback)(struct request *))
1947{
1948        struct request_queue *q = rq->q;
1949        unsigned long flags = 0UL;
1950
1951        if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
1952                return 1;
1953
1954        /* Special feature for tricky drivers */
1955        if (drv_callback && drv_callback(rq))
1956                return 1;
1957
1958        add_disk_randomness(rq->rq_disk);
1959
1960        spin_lock_irqsave(q->queue_lock, flags);
1961        end_that_request_last(rq, error);
1962        spin_unlock_irqrestore(q->queue_lock, flags);
1963
1964        return 0;
1965}
1966
1967/**
1968 * blk_end_request - Helper function for drivers to complete the request.
1969 * @rq:       the request being processed
1970 * @error:    %0 for success, < %0 for error
1971 * @nr_bytes: number of bytes to complete
1972 *
1973 * Description:
1974 *     Ends I/O on a number of bytes attached to @rq.
1975 *     If @rq has leftover, sets it up for the next range of segments.
1976 *
1977 * Return:
1978 *     %0 - we are done with this request
1979 *     %1 - still buffers pending for this request
1980 **/
1981int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1982{
1983        return blk_end_io(rq, error, nr_bytes, 0, NULL);
1984}
1985EXPORT_SYMBOL_GPL(blk_end_request);
1986
1987/**
1988 * __blk_end_request - Helper function for drivers to complete the request.
1989 * @rq:       the request being processed
1990 * @error:    %0 for success, < %0 for error
1991 * @nr_bytes: number of bytes to complete
1992 *
1993 * Description:
1994 *     Must be called with queue lock held unlike blk_end_request().
1995 *
1996 * Return:
1997 *     %0 - we are done with this request
1998 *     %1 - still buffers pending for this request
1999 **/
2000int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2001{
2002        if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
2003                return 1;
2004
2005        add_disk_randomness(rq->rq_disk);
2006
2007        end_that_request_last(rq, error);
2008
2009        return 0;
2010}
2011EXPORT_SYMBOL_GPL(__blk_end_request);
2012
2013/**
2014 * blk_end_bidi_request - Helper function for drivers to complete bidi request.
2015 * @rq:         the bidi request being processed
2016 * @error:      %0 for success, < %0 for error
2017 * @nr_bytes:   number of bytes to complete @rq
2018 * @bidi_bytes: number of bytes to complete @rq->next_rq
2019 *
2020 * Description:
2021 *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2022 *
2023 * Return:
2024 *     %0 - we are done with this request
2025 *     %1 - still buffers pending for this request
2026 **/
2027int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
2028                         unsigned int bidi_bytes)
2029{
2030        return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
2031}
2032EXPORT_SYMBOL_GPL(blk_end_bidi_request);
2033
2034/**
2035 * blk_update_request - Special helper function for request stacking drivers
2036 * @rq:           the request being processed
2037 * @error:        %0 for success, < %0 for error
2038 * @nr_bytes:     number of bytes to complete @rq
2039 *
2040 * Description:
2041 *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
2042 *     the request structure even if @rq doesn't have leftover.
2043 *     If @rq has leftover, sets it up for the next range of segments.
2044 *
2045 *     This special helper function is only for request stacking drivers
2046 *     (e.g. request-based dm) so that they can handle partial completion.
2047 *     Actual device drivers should use blk_end_request instead.
2048 */
2049void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
2050{
2051        if (!end_that_request_data(rq, error, nr_bytes, 0)) {
2052                /*
2053                 * These members are not updated in end_that_request_data()
2054                 * when all bios are completed.
2055                 * Update them so that the request stacking driver can find
2056                 * how many bytes remain in the request later.
2057                 */
2058                rq->nr_sectors = rq->hard_nr_sectors = 0;
2059                rq->current_nr_sectors = rq->hard_cur_sectors = 0;
2060        }
2061}
2062EXPORT_SYMBOL_GPL(blk_update_request);
2063
2064/**
2065 * blk_end_request_callback - Special helper function for tricky drivers
2066 * @rq:           the request being processed
2067 * @error:        %0 for success, < %0 for error
2068 * @nr_bytes:     number of bytes to complete
2069 * @drv_callback: function called between completion of bios in the request
2070 *                and completion of the request.
2071 *                If the callback returns non %0, this helper returns without
2072 *                completion of the request.
2073 *
2074 * Description:
2075 *     Ends I/O on a number of bytes attached to @rq.
2076 *     If @rq has leftover, sets it up for the next range of segments.
2077 *
2078 *     This special helper function is used only for existing tricky drivers.
2079 *     (e.g. cdrom_newpc_intr() of ide-cd)
2080 *     This interface will be removed when such drivers are rewritten.
2081 *     Don't use this interface in other places anymore.
2082 *
2083 * Return:
2084 *     %0 - we are done with this request
2085 *     %1 - this request is not freed yet.
2086 *          this request still has pending buffers or
2087 *          the driver doesn't want to finish this request yet.
2088 **/
2089int blk_end_request_callback(struct request *rq, int error,
2090                             unsigned int nr_bytes,
2091                             int (drv_callback)(struct request *))
2092{
2093        return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2094}
2095EXPORT_SYMBOL_GPL(blk_end_request_callback);
2096
2097void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2098                     struct bio *bio)
2099{
2100        /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
2101           we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
2102        rq->cmd_flags |= (bio->bi_rw & 3);
2103
2104        if (bio_has_data(bio)) {
2105                rq->nr_phys_segments = bio_phys_segments(q, bio);
2106                rq->buffer = bio_data(bio);
2107        }
2108        rq->current_nr_sectors = bio_cur_sectors(bio);
2109        rq->hard_cur_sectors = rq->current_nr_sectors;
2110        rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2111        rq->data_len = bio->bi_size;
2112
2113        rq->bio = rq->biotail = bio;
2114
2115        if (bio->bi_bdev)
2116                rq->rq_disk = bio->bi_bdev->bd_disk;
2117}
2118
2119/**
2120 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2121 * @q : the queue of the device being checked
2122 *
2123 * Description:
2124 *    Check if underlying low-level drivers of a device are busy.
2125 *    If the drivers want to export their busy state, they must set own
2126 *    exporting function using blk_queue_lld_busy() first.
2127 *
2128 *    Basically, this function is used only by request stacking drivers
2129 *    to stop dispatching requests to underlying devices when underlying
2130 *    devices are busy.  This behavior helps more I/O merging on the queue
2131 *    of the request stacking driver and prevents I/O throughput regression
2132 *    on burst I/O load.
2133 *
2134 * Return:
2135 *    0 - Not busy (The request stacking driver should dispatch request)
2136 *    1 - Busy (The request stacking driver should stop dispatching request)
2137 */
2138int blk_lld_busy(struct request_queue *q)
2139{
2140        if (q->lld_busy_fn)
2141                return q->lld_busy_fn(q);
2142
2143        return 0;
2144}
2145EXPORT_SYMBOL_GPL(blk_lld_busy);
2146
2147int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2148{
2149        return queue_work(kblockd_workqueue, work);
2150}
2151EXPORT_SYMBOL(kblockd_schedule_work);
2152
2153int __init blk_dev_init(void)
2154{
2155        kblockd_workqueue = create_workqueue("kblockd");
2156        if (!kblockd_workqueue)
2157                panic("Failed to create kblockd\n");
2158
2159        request_cachep = kmem_cache_create("blkdev_requests",
2160                        sizeof(struct request), 0, SLAB_PANIC, NULL);
2161
2162        blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2163                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2164
2165        return 0;
2166}
2167
2168