linux/block/blk-core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1991, 1992 Linus Torvalds
   3 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   4 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
   6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   7 *      -  July2000
   8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   9 */
  10
  11/*
  12 * This handles all read/write requests to block devices
  13 */
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/backing-dev.h>
  17#include <linux/bio.h>
  18#include <linux/blkdev.h>
  19#include <linux/highmem.h>
  20#include <linux/mm.h>
  21#include <linux/kernel_stat.h>
  22#include <linux/string.h>
  23#include <linux/init.h>
  24#include <linux/completion.h>
  25#include <linux/slab.h>
  26#include <linux/swap.h>
  27#include <linux/writeback.h>
  28#include <linux/task_io_accounting_ops.h>
  29#include <linux/fault-inject.h>
  30
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/block.h>
  33
  34#include "blk.h"
  35
  36EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
  37EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
  38EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
  39
  40static int __make_request(struct request_queue *q, struct bio *bio);
  41
  42/*
  43 * For the allocated request tables
  44 */
  45static struct kmem_cache *request_cachep;
  46
  47/*
  48 * For queue allocation
  49 */
  50struct kmem_cache *blk_requestq_cachep;
  51
  52/*
  53 * Controlling structure to kblockd
  54 */
  55static struct workqueue_struct *kblockd_workqueue;
  56
  57static void drive_stat_acct(struct request *rq, int new_io)
  58{
  59        struct hd_struct *part;
  60        int rw = rq_data_dir(rq);
  61        int cpu;
  62
  63        if (!blk_do_io_stat(rq))
  64                return;
  65
  66        cpu = part_stat_lock();
  67        part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
  68
  69        if (!new_io)
  70                part_stat_inc(cpu, part, merges[rw]);
  71        else {
  72                part_round_stats(cpu, part);
  73                part_inc_in_flight(part, rw);
  74        }
  75
  76        part_stat_unlock();
  77}
  78
  79void blk_queue_congestion_threshold(struct request_queue *q)
  80{
  81        int nr;
  82
  83        nr = q->nr_requests - (q->nr_requests / 8) + 1;
  84        if (nr > q->nr_requests)
  85                nr = q->nr_requests;
  86        q->nr_congestion_on = nr;
  87
  88        nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  89        if (nr < 1)
  90                nr = 1;
  91        q->nr_congestion_off = nr;
  92}
  93
  94/**
  95 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  96 * @bdev:       device
  97 *
  98 * Locates the passed device's request queue and returns the address of its
  99 * backing_dev_info
 100 *
 101 * Will return NULL if the request queue cannot be located.
 102 */
 103struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 104{
 105        struct backing_dev_info *ret = NULL;
 106        struct request_queue *q = bdev_get_queue(bdev);
 107
 108        if (q)
 109                ret = &q->backing_dev_info;
 110        return ret;
 111}
 112EXPORT_SYMBOL(blk_get_backing_dev_info);
 113
 114void blk_rq_init(struct request_queue *q, struct request *rq)
 115{
 116        memset(rq, 0, sizeof(*rq));
 117
 118        INIT_LIST_HEAD(&rq->queuelist);
 119        INIT_LIST_HEAD(&rq->timeout_list);
 120        rq->cpu = -1;
 121        rq->q = q;
 122        rq->__sector = (sector_t) -1;
 123        INIT_HLIST_NODE(&rq->hash);
 124        RB_CLEAR_NODE(&rq->rb_node);
 125        rq->cmd = rq->__cmd;
 126        rq->cmd_len = BLK_MAX_CDB;
 127        rq->tag = -1;
 128        rq->ref_count = 1;
 129        rq->start_time = jiffies;
 130        set_start_time_ns(rq);
 131}
 132EXPORT_SYMBOL(blk_rq_init);
 133
 134static void req_bio_endio(struct request *rq, struct bio *bio,
 135                          unsigned int nbytes, int error)
 136{
 137        struct request_queue *q = rq->q;
 138
 139        if (&q->bar_rq != rq) {
 140                if (error)
 141                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
 142                else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
 143                        error = -EIO;
 144
 145                if (unlikely(nbytes > bio->bi_size)) {
 146                        printk(KERN_ERR "%s: want %u bytes done, %u left\n",
 147                               __func__, nbytes, bio->bi_size);
 148                        nbytes = bio->bi_size;
 149                }
 150
 151                if (unlikely(rq->cmd_flags & REQ_QUIET))
 152                        set_bit(BIO_QUIET, &bio->bi_flags);
 153
 154                bio->bi_size -= nbytes;
 155                bio->bi_sector += (nbytes >> 9);
 156
 157                if (bio_integrity(bio))
 158                        bio_integrity_advance(bio, nbytes);
 159
 160                if (bio->bi_size == 0)
 161                        bio_endio(bio, error);
 162        } else {
 163
 164                /*
 165                 * Okay, this is the barrier request in progress, just
 166                 * record the error;
 167                 */
 168                if (error && !q->orderr)
 169                        q->orderr = error;
 170        }
 171}
 172
 173void blk_dump_rq_flags(struct request *rq, char *msg)
 174{
 175        int bit;
 176
 177        printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
 178                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
 179                rq->cmd_flags);
 180
 181        printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
 182               (unsigned long long)blk_rq_pos(rq),
 183               blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
 184        printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
 185               rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
 186
 187        if (blk_pc_request(rq)) {
 188                printk(KERN_INFO "  cdb: ");
 189                for (bit = 0; bit < BLK_MAX_CDB; bit++)
 190                        printk("%02x ", rq->cmd[bit]);
 191                printk("\n");
 192        }
 193}
 194EXPORT_SYMBOL(blk_dump_rq_flags);
 195
 196/*
 197 * "plug" the device if there are no outstanding requests: this will
 198 * force the transfer to start only after we have put all the requests
 199 * on the list.
 200 *
 201 * This is called with interrupts off and no requests on the queue and
 202 * with the queue lock held.
 203 */
 204void blk_plug_device(struct request_queue *q)
 205{
 206        WARN_ON(!irqs_disabled());
 207
 208        /*
 209         * don't plug a stopped queue, it must be paired with blk_start_queue()
 210         * which will restart the queueing
 211         */
 212        if (blk_queue_stopped(q))
 213                return;
 214
 215        if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
 216                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
 217                trace_block_plug(q);
 218        }
 219}
 220EXPORT_SYMBOL(blk_plug_device);
 221
 222/**
 223 * blk_plug_device_unlocked - plug a device without queue lock held
 224 * @q:    The &struct request_queue to plug
 225 *
 226 * Description:
 227 *   Like @blk_plug_device(), but grabs the queue lock and disables
 228 *   interrupts.
 229 **/
 230void blk_plug_device_unlocked(struct request_queue *q)
 231{
 232        unsigned long flags;
 233
 234        spin_lock_irqsave(q->queue_lock, flags);
 235        blk_plug_device(q);
 236        spin_unlock_irqrestore(q->queue_lock, flags);
 237}
 238EXPORT_SYMBOL(blk_plug_device_unlocked);
 239
 240/*
 241 * remove the queue from the plugged list, if present. called with
 242 * queue lock held and interrupts disabled.
 243 */
 244int blk_remove_plug(struct request_queue *q)
 245{
 246        WARN_ON(!irqs_disabled());
 247
 248        if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
 249                return 0;
 250
 251        del_timer(&q->unplug_timer);
 252        return 1;
 253}
 254EXPORT_SYMBOL(blk_remove_plug);
 255
 256/*
 257 * remove the plug and let it rip..
 258 */
 259void __generic_unplug_device(struct request_queue *q)
 260{
 261        if (unlikely(blk_queue_stopped(q)))
 262                return;
 263        if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
 264                return;
 265
 266        q->request_fn(q);
 267}
 268
 269/**
 270 * generic_unplug_device - fire a request queue
 271 * @q:    The &struct request_queue in question
 272 *
 273 * Description:
 274 *   Linux uses plugging to build bigger requests queues before letting
 275 *   the device have at them. If a queue is plugged, the I/O scheduler
 276 *   is still adding and merging requests on the queue. Once the queue
 277 *   gets unplugged, the request_fn defined for the queue is invoked and
 278 *   transfers started.
 279 **/
 280void generic_unplug_device(struct request_queue *q)
 281{
 282        if (blk_queue_plugged(q)) {
 283                spin_lock_irq(q->queue_lock);
 284                __generic_unplug_device(q);
 285                spin_unlock_irq(q->queue_lock);
 286        }
 287}
 288EXPORT_SYMBOL(generic_unplug_device);
 289
 290static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 291                                   struct page *page)
 292{
 293        struct request_queue *q = bdi->unplug_io_data;
 294
 295        blk_unplug(q);
 296}
 297
 298void blk_unplug_work(struct work_struct *work)
 299{
 300        struct request_queue *q =
 301                container_of(work, struct request_queue, unplug_work);
 302
 303        trace_block_unplug_io(q);
 304        q->unplug_fn(q);
 305}
 306
 307void blk_unplug_timeout(unsigned long data)
 308{
 309        struct request_queue *q = (struct request_queue *)data;
 310
 311        trace_block_unplug_timer(q);
 312        kblockd_schedule_work(q, &q->unplug_work);
 313}
 314
 315void blk_unplug(struct request_queue *q)
 316{
 317        /*
 318         * devices don't necessarily have an ->unplug_fn defined
 319         */
 320        if (q->unplug_fn) {
 321                trace_block_unplug_io(q);
 322                q->unplug_fn(q);
 323        }
 324}
 325EXPORT_SYMBOL(blk_unplug);
 326
 327/**
 328 * blk_start_queue - restart a previously stopped queue
 329 * @q:    The &struct request_queue in question
 330 *
 331 * Description:
 332 *   blk_start_queue() will clear the stop flag on the queue, and call
 333 *   the request_fn for the queue if it was in a stopped state when
 334 *   entered. Also see blk_stop_queue(). Queue lock must be held.
 335 **/
 336void blk_start_queue(struct request_queue *q)
 337{
 338        WARN_ON(!irqs_disabled());
 339
 340        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 341        __blk_run_queue(q);
 342}
 343EXPORT_SYMBOL(blk_start_queue);
 344
 345/**
 346 * blk_stop_queue - stop a queue
 347 * @q:    The &struct request_queue in question
 348 *
 349 * Description:
 350 *   The Linux block layer assumes that a block driver will consume all
 351 *   entries on the request queue when the request_fn strategy is called.
 352 *   Often this will not happen, because of hardware limitations (queue
 353 *   depth settings). If a device driver gets a 'queue full' response,
 354 *   or if it simply chooses not to queue more I/O at one point, it can
 355 *   call this function to prevent the request_fn from being called until
 356 *   the driver has signalled it's ready to go again. This happens by calling
 357 *   blk_start_queue() to restart queue operations. Queue lock must be held.
 358 **/
 359void blk_stop_queue(struct request_queue *q)
 360{
 361        blk_remove_plug(q);
 362        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 363}
 364EXPORT_SYMBOL(blk_stop_queue);
 365
 366/**
 367 * blk_sync_queue - cancel any pending callbacks on a queue
 368 * @q: the queue
 369 *
 370 * Description:
 371 *     The block layer may perform asynchronous callback activity
 372 *     on a queue, such as calling the unplug function after a timeout.
 373 *     A block device may call blk_sync_queue to ensure that any
 374 *     such activity is cancelled, thus allowing it to release resources
 375 *     that the callbacks might use. The caller must already have made sure
 376 *     that its ->make_request_fn will not re-add plugging prior to calling
 377 *     this function.
 378 *
 379 */
 380void blk_sync_queue(struct request_queue *q)
 381{
 382        del_timer_sync(&q->unplug_timer);
 383        del_timer_sync(&q->timeout);
 384        cancel_work_sync(&q->unplug_work);
 385}
 386EXPORT_SYMBOL(blk_sync_queue);
 387
 388/**
 389 * __blk_run_queue - run a single device queue
 390 * @q:  The queue to run
 391 *
 392 * Description:
 393 *    See @blk_run_queue. This variant must be called with the queue lock
 394 *    held and interrupts disabled.
 395 *
 396 */
 397void __blk_run_queue(struct request_queue *q)
 398{
 399        blk_remove_plug(q);
 400
 401        if (unlikely(blk_queue_stopped(q)))
 402                return;
 403
 404        if (elv_queue_empty(q))
 405                return;
 406
 407        /*
 408         * Only recurse once to avoid overrunning the stack, let the unplug
 409         * handling reinvoke the handler shortly if we already got there.
 410         */
 411        if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 412                q->request_fn(q);
 413                queue_flag_clear(QUEUE_FLAG_REENTER, q);
 414        } else {
 415                queue_flag_set(QUEUE_FLAG_PLUGGED, q);
 416                kblockd_schedule_work(q, &q->unplug_work);
 417        }
 418}
 419EXPORT_SYMBOL(__blk_run_queue);
 420
 421/**
 422 * blk_run_queue - run a single device queue
 423 * @q: The queue to run
 424 *
 425 * Description:
 426 *    Invoke request handling on this queue, if it has pending work to do.
 427 *    May be used to restart queueing when a request has completed.
 428 */
 429void blk_run_queue(struct request_queue *q)
 430{
 431        unsigned long flags;
 432
 433        spin_lock_irqsave(q->queue_lock, flags);
 434        __blk_run_queue(q);
 435        spin_unlock_irqrestore(q->queue_lock, flags);
 436}
 437EXPORT_SYMBOL(blk_run_queue);
 438
 439void blk_put_queue(struct request_queue *q)
 440{
 441        kobject_put(&q->kobj);
 442}
 443
 444void blk_cleanup_queue(struct request_queue *q)
 445{
 446        /*
 447         * We know we have process context here, so we can be a little
 448         * cautious and ensure that pending block actions on this device
 449         * are done before moving on. Going into this function, we should
 450         * not have processes doing IO to this device.
 451         */
 452        blk_sync_queue(q);
 453
 454        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
 455        mutex_lock(&q->sysfs_lock);
 456        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 457        mutex_unlock(&q->sysfs_lock);
 458
 459        if (q->elevator)
 460                elevator_exit(q->elevator);
 461
 462        blk_put_queue(q);
 463}
 464EXPORT_SYMBOL(blk_cleanup_queue);
 465
 466static int blk_init_free_list(struct request_queue *q)
 467{
 468        struct request_list *rl = &q->rq;
 469
 470        if (unlikely(rl->rq_pool))
 471                return 0;
 472
 473        rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
 474        rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
 475        rl->elvpriv = 0;
 476        init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
 477        init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
 478
 479        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
 480                                mempool_free_slab, request_cachep, q->node);
 481
 482        if (!rl->rq_pool)
 483                return -ENOMEM;
 484
 485        return 0;
 486}
 487
 488struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 489{
 490        return blk_alloc_queue_node(gfp_mask, -1);
 491}
 492EXPORT_SYMBOL(blk_alloc_queue);
 493
 494struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 495{
 496        struct request_queue *q;
 497        int err;
 498
 499        q = kmem_cache_alloc_node(blk_requestq_cachep,
 500                                gfp_mask | __GFP_ZERO, node_id);
 501        if (!q)
 502                return NULL;
 503
 504        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
 505        q->backing_dev_info.unplug_io_data = q;
 506        q->backing_dev_info.ra_pages =
 507                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 508        q->backing_dev_info.state = 0;
 509        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
 510        q->backing_dev_info.name = "block";
 511
 512        err = bdi_init(&q->backing_dev_info);
 513        if (err) {
 514                kmem_cache_free(blk_requestq_cachep, q);
 515                return NULL;
 516        }
 517
 518        setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 519                    laptop_mode_timer_fn, (unsigned long) q);
 520        init_timer(&q->unplug_timer);
 521        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 522        INIT_LIST_HEAD(&q->timeout_list);
 523        INIT_WORK(&q->unplug_work, blk_unplug_work);
 524
 525        kobject_init(&q->kobj, &blk_queue_ktype);
 526
 527        mutex_init(&q->sysfs_lock);
 528        spin_lock_init(&q->__queue_lock);
 529
 530        return q;
 531}
 532EXPORT_SYMBOL(blk_alloc_queue_node);
 533
 534/**
 535 * blk_init_queue  - prepare a request queue for use with a block device
 536 * @rfn:  The function to be called to process requests that have been
 537 *        placed on the queue.
 538 * @lock: Request queue spin lock
 539 *
 540 * Description:
 541 *    If a block device wishes to use the standard request handling procedures,
 542 *    which sorts requests and coalesces adjacent requests, then it must
 543 *    call blk_init_queue().  The function @rfn will be called when there
 544 *    are requests on the queue that need to be processed.  If the device
 545 *    supports plugging, then @rfn may not be called immediately when requests
 546 *    are available on the queue, but may be called at some time later instead.
 547 *    Plugged queues are generally unplugged when a buffer belonging to one
 548 *    of the requests on the queue is needed, or due to memory pressure.
 549 *
 550 *    @rfn is not required, or even expected, to remove all requests off the
 551 *    queue, but only as many as it can handle at a time.  If it does leave
 552 *    requests on the queue, it is responsible for arranging that the requests
 553 *    get dealt with eventually.
 554 *
 555 *    The queue spin lock must be held while manipulating the requests on the
 556 *    request queue; this lock will be taken also from interrupt context, so irq
 557 *    disabling is needed for it.
 558 *
 559 *    Function returns a pointer to the initialized request queue, or %NULL if
 560 *    it didn't succeed.
 561 *
 562 * Note:
 563 *    blk_init_queue() must be paired with a blk_cleanup_queue() call
 564 *    when the block device is deactivated (such as at module unload).
 565 **/
 566
 567struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 568{
 569        return blk_init_queue_node(rfn, lock, -1);
 570}
 571EXPORT_SYMBOL(blk_init_queue);
 572
 573struct request_queue *
 574blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 575{
 576        struct request_queue *uninit_q, *q;
 577
 578        uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 579        if (!uninit_q)
 580                return NULL;
 581
 582        q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
 583        if (!q)
 584                blk_cleanup_queue(uninit_q);
 585
 586        return q;
 587}
 588EXPORT_SYMBOL(blk_init_queue_node);
 589
 590struct request_queue *
 591blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 592                         spinlock_t *lock)
 593{
 594        return blk_init_allocated_queue_node(q, rfn, lock, -1);
 595}
 596EXPORT_SYMBOL(blk_init_allocated_queue);
 597
 598struct request_queue *
 599blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 600                              spinlock_t *lock, int node_id)
 601{
 602        if (!q)
 603                return NULL;
 604
 605        q->node = node_id;
 606        if (blk_init_free_list(q))
 607                return NULL;
 608
 609        q->request_fn           = rfn;
 610        q->prep_rq_fn           = NULL;
 611        q->unplug_fn            = generic_unplug_device;
 612        q->queue_flags          = QUEUE_FLAG_DEFAULT;
 613        q->queue_lock           = lock;
 614
 615        /*
 616         * This also sets hw/phys segments, boundary and size
 617         */
 618        blk_queue_make_request(q, __make_request);
 619
 620        q->sg_reserved_size = INT_MAX;
 621
 622        /*
 623         * all done
 624         */
 625        if (!elevator_init(q, NULL)) {
 626                blk_queue_congestion_threshold(q);
 627                return q;
 628        }
 629
 630        return NULL;
 631}
 632EXPORT_SYMBOL(blk_init_allocated_queue_node);
 633
 634int blk_get_queue(struct request_queue *q)
 635{
 636        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
 637                kobject_get(&q->kobj);
 638                return 0;
 639        }
 640
 641        return 1;
 642}
 643
 644static inline void blk_free_request(struct request_queue *q, struct request *rq)
 645{
 646        if (rq->cmd_flags & REQ_ELVPRIV)
 647                elv_put_request(q, rq);
 648        mempool_free(rq, q->rq.rq_pool);
 649}
 650
 651static struct request *
 652blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
 653{
 654        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 655
 656        if (!rq)
 657                return NULL;
 658
 659        blk_rq_init(q, rq);
 660
 661        rq->cmd_flags = flags | REQ_ALLOCED;
 662
 663        if (priv) {
 664                if (unlikely(elv_set_request(q, rq, gfp_mask))) {
 665                        mempool_free(rq, q->rq.rq_pool);
 666                        return NULL;
 667                }
 668                rq->cmd_flags |= REQ_ELVPRIV;
 669        }
 670
 671        return rq;
 672}
 673
 674/*
 675 * ioc_batching returns true if the ioc is a valid batching request and
 676 * should be given priority access to a request.
 677 */
 678static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
 679{
 680        if (!ioc)
 681                return 0;
 682
 683        /*
 684         * Make sure the process is able to allocate at least 1 request
 685         * even if the batch times out, otherwise we could theoretically
 686         * lose wakeups.
 687         */
 688        return ioc->nr_batch_requests == q->nr_batching ||
 689                (ioc->nr_batch_requests > 0
 690                && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
 691}
 692
 693/*
 694 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
 695 * will cause the process to be a "batcher" on all queues in the system. This
 696 * is the behaviour we want though - once it gets a wakeup it should be given
 697 * a nice run.
 698 */
 699static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
 700{
 701        if (!ioc || ioc_batching(q, ioc))
 702                return;
 703
 704        ioc->nr_batch_requests = q->nr_batching;
 705        ioc->last_waited = jiffies;
 706}
 707
 708static void __freed_request(struct request_queue *q, int sync)
 709{
 710        struct request_list *rl = &q->rq;
 711
 712        if (rl->count[sync] < queue_congestion_off_threshold(q))
 713                blk_clear_queue_congested(q, sync);
 714
 715        if (rl->count[sync] + 1 <= q->nr_requests) {
 716                if (waitqueue_active(&rl->wait[sync]))
 717                        wake_up(&rl->wait[sync]);
 718
 719                blk_clear_queue_full(q, sync);
 720        }
 721}
 722
 723/*
 724 * A request has just been released.  Account for it, update the full and
 725 * congestion status, wake up any waiters.   Called under q->queue_lock.
 726 */
 727static void freed_request(struct request_queue *q, int sync, int priv)
 728{
 729        struct request_list *rl = &q->rq;
 730
 731        rl->count[sync]--;
 732        if (priv)
 733                rl->elvpriv--;
 734
 735        __freed_request(q, sync);
 736
 737        if (unlikely(rl->starved[sync ^ 1]))
 738                __freed_request(q, sync ^ 1);
 739}
 740
 741/*
 742 * Get a free request, queue_lock must be held.
 743 * Returns NULL on failure, with queue_lock held.
 744 * Returns !NULL on success, with queue_lock *not held*.
 745 */
 746static struct request *get_request(struct request_queue *q, int rw_flags,
 747                                   struct bio *bio, gfp_t gfp_mask)
 748{
 749        struct request *rq = NULL;
 750        struct request_list *rl = &q->rq;
 751        struct io_context *ioc = NULL;
 752        const bool is_sync = rw_is_sync(rw_flags) != 0;
 753        int may_queue, priv;
 754
 755        may_queue = elv_may_queue(q, rw_flags);
 756        if (may_queue == ELV_MQUEUE_NO)
 757                goto rq_starved;
 758
 759        if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
 760                if (rl->count[is_sync]+1 >= q->nr_requests) {
 761                        ioc = current_io_context(GFP_ATOMIC, q->node);
 762                        /*
 763                         * The queue will fill after this allocation, so set
 764                         * it as full, and mark this process as "batching".
 765                         * This process will be allowed to complete a batch of
 766                         * requests, others will be blocked.
 767                         */
 768                        if (!blk_queue_full(q, is_sync)) {
 769                                ioc_set_batching(q, ioc);
 770                                blk_set_queue_full(q, is_sync);
 771                        } else {
 772                                if (may_queue != ELV_MQUEUE_MUST
 773                                                && !ioc_batching(q, ioc)) {
 774                                        /*
 775                                         * The queue is full and the allocating
 776                                         * process is not a "batcher", and not
 777                                         * exempted by the IO scheduler
 778                                         */
 779                                        goto out;
 780                                }
 781                        }
 782                }
 783                blk_set_queue_congested(q, is_sync);
 784        }
 785
 786        /*
 787         * Only allow batching queuers to allocate up to 50% over the defined
 788         * limit of requests, otherwise we could have thousands of requests
 789         * allocated with any setting of ->nr_requests
 790         */
 791        if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
 792                goto out;
 793
 794        rl->count[is_sync]++;
 795        rl->starved[is_sync] = 0;
 796
 797        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
 798        if (priv)
 799                rl->elvpriv++;
 800
 801        if (blk_queue_io_stat(q))
 802                rw_flags |= REQ_IO_STAT;
 803        spin_unlock_irq(q->queue_lock);
 804
 805        rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
 806        if (unlikely(!rq)) {
 807                /*
 808                 * Allocation failed presumably due to memory. Undo anything
 809                 * we might have messed up.
 810                 *
 811                 * Allocating task should really be put onto the front of the
 812                 * wait queue, but this is pretty rare.
 813                 */
 814                spin_lock_irq(q->queue_lock);
 815                freed_request(q, is_sync, priv);
 816
 817                /*
 818                 * in the very unlikely event that allocation failed and no
 819                 * requests for this direction was pending, mark us starved
 820                 * so that freeing of a request in the other direction will
 821                 * notice us. another possible fix would be to split the
 822                 * rq mempool into READ and WRITE
 823                 */
 824rq_starved:
 825                if (unlikely(rl->count[is_sync] == 0))
 826                        rl->starved[is_sync] = 1;
 827
 828                goto out;
 829        }
 830
 831        /*
 832         * ioc may be NULL here, and ioc_batching will be false. That's
 833         * OK, if the queue is under the request limit then requests need
 834         * not count toward the nr_batch_requests limit. There will always
 835         * be some limit enforced by BLK_BATCH_TIME.
 836         */
 837        if (ioc_batching(q, ioc))
 838                ioc->nr_batch_requests--;
 839
 840        trace_block_getrq(q, bio, rw_flags & 1);
 841out:
 842        return rq;
 843}
 844
 845/*
 846 * No available requests for this queue, unplug the device and wait for some
 847 * requests to become available.
 848 *
 849 * Called with q->queue_lock held, and returns with it unlocked.
 850 */
 851static struct request *get_request_wait(struct request_queue *q, int rw_flags,
 852                                        struct bio *bio)
 853{
 854        const bool is_sync = rw_is_sync(rw_flags) != 0;
 855        struct request *rq;
 856
 857        rq = get_request(q, rw_flags, bio, GFP_NOIO);
 858        while (!rq) {
 859                DEFINE_WAIT(wait);
 860                struct io_context *ioc;
 861                struct request_list *rl = &q->rq;
 862
 863                prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
 864                                TASK_UNINTERRUPTIBLE);
 865
 866                trace_block_sleeprq(q, bio, rw_flags & 1);
 867
 868                __generic_unplug_device(q);
 869                spin_unlock_irq(q->queue_lock);
 870                io_schedule();
 871
 872                /*
 873                 * After sleeping, we become a "batching" process and
 874                 * will be able to allocate at least one request, and
 875                 * up to a big batch of them for a small period time.
 876                 * See ioc_batching, ioc_set_batching
 877                 */
 878                ioc = current_io_context(GFP_NOIO, q->node);
 879                ioc_set_batching(q, ioc);
 880
 881                spin_lock_irq(q->queue_lock);
 882                finish_wait(&rl->wait[is_sync], &wait);
 883
 884                rq = get_request(q, rw_flags, bio, GFP_NOIO);
 885        };
 886
 887        return rq;
 888}
 889
 890struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 891{
 892        struct request *rq;
 893
 894        BUG_ON(rw != READ && rw != WRITE);
 895
 896        spin_lock_irq(q->queue_lock);
 897        if (gfp_mask & __GFP_WAIT) {
 898                rq = get_request_wait(q, rw, NULL);
 899        } else {
 900                rq = get_request(q, rw, NULL, gfp_mask);
 901                if (!rq)
 902                        spin_unlock_irq(q->queue_lock);
 903        }
 904        /* q->queue_lock is unlocked at this point */
 905
 906        return rq;
 907}
 908EXPORT_SYMBOL(blk_get_request);
 909
 910/**
 911 * blk_make_request - given a bio, allocate a corresponding struct request.
 912 * @q: target request queue
 913 * @bio:  The bio describing the memory mappings that will be submitted for IO.
 914 *        It may be a chained-bio properly constructed by block/bio layer.
 915 * @gfp_mask: gfp flags to be used for memory allocation
 916 *
 917 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
 918 * type commands. Where the struct request needs to be farther initialized by
 919 * the caller. It is passed a &struct bio, which describes the memory info of
 920 * the I/O transfer.
 921 *
 922 * The caller of blk_make_request must make sure that bi_io_vec
 923 * are set to describe the memory buffers. That bio_data_dir() will return
 924 * the needed direction of the request. (And all bio's in the passed bio-chain
 925 * are properly set accordingly)
 926 *
 927 * If called under none-sleepable conditions, mapped bio buffers must not
 928 * need bouncing, by calling the appropriate masked or flagged allocator,
 929 * suitable for the target device. Otherwise the call to blk_queue_bounce will
 930 * BUG.
 931 *
 932 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
 933 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
 934 * anything but the first bio in the chain. Otherwise you risk waiting for IO
 935 * completion of a bio that hasn't been submitted yet, thus resulting in a
 936 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
 937 * of bio_alloc(), as that avoids the mempool deadlock.
 938 * If possible a big IO should be split into smaller parts when allocation
 939 * fails. Partial allocation should not be an error, or you risk a live-lock.
 940 */
 941struct request *blk_make_request(struct request_queue *q, struct bio *bio,
 942                                 gfp_t gfp_mask)
 943{
 944        struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
 945
 946        if (unlikely(!rq))
 947                return ERR_PTR(-ENOMEM);
 948
 949        for_each_bio(bio) {
 950                struct bio *bounce_bio = bio;
 951                int ret;
 952
 953                blk_queue_bounce(q, &bounce_bio);
 954                ret = blk_rq_append_bio(q, rq, bounce_bio);
 955                if (unlikely(ret)) {
 956                        blk_put_request(rq);
 957                        return ERR_PTR(ret);
 958                }
 959        }
 960
 961        return rq;
 962}
 963EXPORT_SYMBOL(blk_make_request);
 964
 965/**
 966 * blk_requeue_request - put a request back on queue
 967 * @q:          request queue where request should be inserted
 968 * @rq:         request to be inserted
 969 *
 970 * Description:
 971 *    Drivers often keep queueing requests until the hardware cannot accept
 972 *    more, when that condition happens we need to put the request back
 973 *    on the queue. Must be called with queue lock held.
 974 */
 975void blk_requeue_request(struct request_queue *q, struct request *rq)
 976{
 977        blk_delete_timer(rq);
 978        blk_clear_rq_complete(rq);
 979        trace_block_rq_requeue(q, rq);
 980
 981        if (blk_rq_tagged(rq))
 982                blk_queue_end_tag(q, rq);
 983
 984        BUG_ON(blk_queued_rq(rq));
 985
 986        elv_requeue_request(q, rq);
 987}
 988EXPORT_SYMBOL(blk_requeue_request);
 989
 990/**
 991 * blk_insert_request - insert a special request into a request queue
 992 * @q:          request queue where request should be inserted
 993 * @rq:         request to be inserted
 994 * @at_head:    insert request at head or tail of queue
 995 * @data:       private data
 996 *
 997 * Description:
 998 *    Many block devices need to execute commands asynchronously, so they don't
 999 *    block the whole kernel from preemption during request execution.  This is
1000 *    accomplished normally by inserting aritficial requests tagged as
1001 *    REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
1002 *    be scheduled for actual execution by the request queue.
1003 *
1004 *    We have the option of inserting the head or the tail of the queue.
1005 *    Typically we use the tail for new ioctls and so forth.  We use the head
1006 *    of the queue for things like a QUEUE_FULL message from a device, or a
1007 *    host that is unable to accept a particular command.
1008 */
1009void blk_insert_request(struct request_queue *q, struct request *rq,
1010                        int at_head, void *data)
1011{
1012        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
1013        unsigned long flags;
1014
1015        /*
1016         * tell I/O scheduler that this isn't a regular read/write (ie it
1017         * must not attempt merges on this) and that it acts as a soft
1018         * barrier
1019         */
1020        rq->cmd_type = REQ_TYPE_SPECIAL;
1021
1022        rq->special = data;
1023
1024        spin_lock_irqsave(q->queue_lock, flags);
1025
1026        /*
1027         * If command is tagged, release the tag
1028         */
1029        if (blk_rq_tagged(rq))
1030                blk_queue_end_tag(q, rq);
1031
1032        drive_stat_acct(rq, 1);
1033        __elv_add_request(q, rq, where, 0);
1034        __blk_run_queue(q);
1035        spin_unlock_irqrestore(q->queue_lock, flags);
1036}
1037EXPORT_SYMBOL(blk_insert_request);
1038
1039/*
1040 * add-request adds a request to the linked list.
1041 * queue lock is held and interrupts disabled, as we muck with the
1042 * request queue list.
1043 */
1044static inline void add_request(struct request_queue *q, struct request *req)
1045{
1046        drive_stat_acct(req, 1);
1047
1048        /*
1049         * elevator indicated where it wants this request to be
1050         * inserted at elevator_merge time
1051         */
1052        __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1053}
1054
1055static void part_round_stats_single(int cpu, struct hd_struct *part,
1056                                    unsigned long now)
1057{
1058        if (now == part->stamp)
1059                return;
1060
1061        if (part_in_flight(part)) {
1062                __part_stat_add(cpu, part, time_in_queue,
1063                                part_in_flight(part) * (now - part->stamp));
1064                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1065        }
1066        part->stamp = now;
1067}
1068
1069/**
1070 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1071 * @cpu: cpu number for stats access
1072 * @part: target partition
1073 *
1074 * The average IO queue length and utilisation statistics are maintained
1075 * by observing the current state of the queue length and the amount of
1076 * time it has been in this state for.
1077 *
1078 * Normally, that accounting is done on IO completion, but that can result
1079 * in more than a second's worth of IO being accounted for within any one
1080 * second, leading to >100% utilisation.  To deal with that, we call this
1081 * function to do a round-off before returning the results when reading
1082 * /proc/diskstats.  This accounts immediately for all queue usage up to
1083 * the current jiffies and restarts the counters again.
1084 */
1085void part_round_stats(int cpu, struct hd_struct *part)
1086{
1087        unsigned long now = jiffies;
1088
1089        if (part->partno)
1090                part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1091        part_round_stats_single(cpu, part, now);
1092}
1093EXPORT_SYMBOL_GPL(part_round_stats);
1094
1095/*
1096 * queue lock must be held
1097 */
1098void __blk_put_request(struct request_queue *q, struct request *req)
1099{
1100        if (unlikely(!q))
1101                return;
1102        if (unlikely(--req->ref_count))
1103                return;
1104
1105        elv_completed_request(q, req);
1106
1107        /* this is a bio leak */
1108        WARN_ON(req->bio != NULL);
1109
1110        /*
1111         * Request may not have originated from ll_rw_blk. if not,
1112         * it didn't come out of our reserved rq pools
1113         */
1114        if (req->cmd_flags & REQ_ALLOCED) {
1115                int is_sync = rq_is_sync(req) != 0;
1116                int priv = req->cmd_flags & REQ_ELVPRIV;
1117
1118                BUG_ON(!list_empty(&req->queuelist));
1119                BUG_ON(!hlist_unhashed(&req->hash));
1120
1121                blk_free_request(q, req);
1122                freed_request(q, is_sync, priv);
1123        }
1124}
1125EXPORT_SYMBOL_GPL(__blk_put_request);
1126
1127void blk_put_request(struct request *req)
1128{
1129        unsigned long flags;
1130        struct request_queue *q = req->q;
1131
1132        spin_lock_irqsave(q->queue_lock, flags);
1133        __blk_put_request(q, req);
1134        spin_unlock_irqrestore(q->queue_lock, flags);
1135}
1136EXPORT_SYMBOL(blk_put_request);
1137
1138void init_request_from_bio(struct request *req, struct bio *bio)
1139{
1140        req->cpu = bio->bi_comp_cpu;
1141        req->cmd_type = REQ_TYPE_FS;
1142
1143        /*
1144         * Inherit FAILFAST from bio (for read-ahead, and explicit
1145         * FAILFAST).  FAILFAST flags are identical for req and bio.
1146         */
1147        if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1148                req->cmd_flags |= REQ_FAILFAST_MASK;
1149        else
1150                req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1151
1152        if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1153                req->cmd_flags |= REQ_DISCARD;
1154        if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1155                req->cmd_flags |= REQ_HARDBARRIER;
1156        if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1157                req->cmd_flags |= REQ_RW_SYNC;
1158        if (bio_rw_flagged(bio, BIO_RW_META))
1159                req->cmd_flags |= REQ_RW_META;
1160        if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1161                req->cmd_flags |= REQ_NOIDLE;
1162
1163        req->errors = 0;
1164        req->__sector = bio->bi_sector;
1165        req->ioprio = bio_prio(bio);
1166        blk_rq_bio_prep(req->q, req, bio);
1167}
1168
1169/*
1170 * Only disabling plugging for non-rotational devices if it does tagging
1171 * as well, otherwise we do need the proper merging
1172 */
1173static inline bool queue_should_plug(struct request_queue *q)
1174{
1175        return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1176}
1177
1178static int __make_request(struct request_queue *q, struct bio *bio)
1179{
1180        struct request *req;
1181        int el_ret;
1182        unsigned int bytes = bio->bi_size;
1183        const unsigned short prio = bio_prio(bio);
1184        const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
1185        const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
1186        const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1187        int rw_flags;
1188
1189        if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
1190            (q->next_ordered == QUEUE_ORDERED_NONE)) {
1191                bio_endio(bio, -EOPNOTSUPP);
1192                return 0;
1193        }
1194        /*
1195         * low level driver can indicate that it wants pages above a
1196         * certain limit bounced to low memory (ie for highmem, or even
1197         * ISA dma in theory)
1198         */
1199        blk_queue_bounce(q, &bio);
1200
1201        spin_lock_irq(q->queue_lock);
1202
1203        if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
1204                goto get_rq;
1205
1206        el_ret = elv_merge(q, &req, bio);
1207        switch (el_ret) {
1208        case ELEVATOR_BACK_MERGE:
1209                BUG_ON(!rq_mergeable(req));
1210
1211                if (!ll_back_merge_fn(q, req, bio))
1212                        break;
1213
1214                trace_block_bio_backmerge(q, bio);
1215
1216                if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1217                        blk_rq_set_mixed_merge(req);
1218
1219                req->biotail->bi_next = bio;
1220                req->biotail = bio;
1221                req->__data_len += bytes;
1222                req->ioprio = ioprio_best(req->ioprio, prio);
1223                if (!blk_rq_cpu_valid(req))
1224                        req->cpu = bio->bi_comp_cpu;
1225                drive_stat_acct(req, 0);
1226                elv_bio_merged(q, req, bio);
1227                if (!attempt_back_merge(q, req))
1228                        elv_merged_request(q, req, el_ret);
1229                goto out;
1230
1231        case ELEVATOR_FRONT_MERGE:
1232                BUG_ON(!rq_mergeable(req));
1233
1234                if (!ll_front_merge_fn(q, req, bio))
1235                        break;
1236
1237                trace_block_bio_frontmerge(q, bio);
1238
1239                if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
1240                        blk_rq_set_mixed_merge(req);
1241                        req->cmd_flags &= ~REQ_FAILFAST_MASK;
1242                        req->cmd_flags |= ff;
1243                }
1244
1245                bio->bi_next = req->bio;
1246                req->bio = bio;
1247
1248                /*
1249                 * may not be valid. if the low level driver said
1250                 * it didn't need a bounce buffer then it better
1251                 * not touch req->buffer either...
1252                 */
1253                req->buffer = bio_data(bio);
1254                req->__sector = bio->bi_sector;
1255                req->__data_len += bytes;
1256                req->ioprio = ioprio_best(req->ioprio, prio);
1257                if (!blk_rq_cpu_valid(req))
1258                        req->cpu = bio->bi_comp_cpu;
1259                drive_stat_acct(req, 0);
1260                elv_bio_merged(q, req, bio);
1261                if (!attempt_front_merge(q, req))
1262                        elv_merged_request(q, req, el_ret);
1263                goto out;
1264
1265        /* ELV_NO_MERGE: elevator says don't/can't merge. */
1266        default:
1267                ;
1268        }
1269
1270get_rq:
1271        /*
1272         * This sync check and mask will be re-done in init_request_from_bio(),
1273         * but we need to set it earlier to expose the sync flag to the
1274         * rq allocator and io schedulers.
1275         */
1276        rw_flags = bio_data_dir(bio);
1277        if (sync)
1278                rw_flags |= REQ_RW_SYNC;
1279
1280        /*
1281         * Grab a free request. This is might sleep but can not fail.
1282         * Returns with the queue unlocked.
1283         */
1284        req = get_request_wait(q, rw_flags, bio);
1285
1286        /*
1287         * After dropping the lock and possibly sleeping here, our request
1288         * may now be mergeable after it had proven unmergeable (above).
1289         * We don't worry about that case for efficiency. It won't happen
1290         * often, and the elevators are able to handle it.
1291         */
1292        init_request_from_bio(req, bio);
1293
1294        spin_lock_irq(q->queue_lock);
1295        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
1296            bio_flagged(bio, BIO_CPU_AFFINE))
1297                req->cpu = blk_cpu_to_group(smp_processor_id());
1298        if (queue_should_plug(q) && elv_queue_empty(q))
1299                blk_plug_device(q);
1300        add_request(q, req);
1301out:
1302        if (unplug || !queue_should_plug(q))
1303                __generic_unplug_device(q);
1304        spin_unlock_irq(q->queue_lock);
1305        return 0;
1306}
1307
1308/*
1309 * If bio->bi_dev is a partition, remap the location
1310 */
1311static inline void blk_partition_remap(struct bio *bio)
1312{
1313        struct block_device *bdev = bio->bi_bdev;
1314
1315        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1316                struct hd_struct *p = bdev->bd_part;
1317
1318                bio->bi_sector += p->start_sect;
1319                bio->bi_bdev = bdev->bd_contains;
1320
1321                trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
1322                                    bdev->bd_dev,
1323                                    bio->bi_sector - p->start_sect);
1324        }
1325}
1326
1327static void handle_bad_sector(struct bio *bio)
1328{
1329        char b[BDEVNAME_SIZE];
1330
1331        printk(KERN_INFO "attempt to access beyond end of device\n");
1332        printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1333                        bdevname(bio->bi_bdev, b),
1334                        bio->bi_rw,
1335                        (unsigned long long)bio->bi_sector + bio_sectors(bio),
1336                        (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1337
1338        set_bit(BIO_EOF, &bio->bi_flags);
1339}
1340
1341#ifdef CONFIG_FAIL_MAKE_REQUEST
1342
1343static DECLARE_FAULT_ATTR(fail_make_request);
1344
1345static int __init setup_fail_make_request(char *str)
1346{
1347        return setup_fault_attr(&fail_make_request, str);
1348}
1349__setup("fail_make_request=", setup_fail_make_request);
1350
1351static int should_fail_request(struct bio *bio)
1352{
1353        struct hd_struct *part = bio->bi_bdev->bd_part;
1354
1355        if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
1356                return should_fail(&fail_make_request, bio->bi_size);
1357
1358        return 0;
1359}
1360
1361static int __init fail_make_request_debugfs(void)
1362{
1363        return init_fault_attr_dentries(&fail_make_request,
1364                                        "fail_make_request");
1365}
1366
1367late_initcall(fail_make_request_debugfs);
1368
1369#else /* CONFIG_FAIL_MAKE_REQUEST */
1370
1371static inline int should_fail_request(struct bio *bio)
1372{
1373        return 0;
1374}
1375
1376#endif /* CONFIG_FAIL_MAKE_REQUEST */
1377
1378/*
1379 * Check whether this bio extends beyond the end of the device.
1380 */
1381static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1382{
1383        sector_t maxsector;
1384
1385        if (!nr_sectors)
1386                return 0;
1387
1388        /* Test device or partition size, when known. */
1389        maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1390        if (maxsector) {
1391                sector_t sector = bio->bi_sector;
1392
1393                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1394                        /*
1395                         * This may well happen - the kernel calls bread()
1396                         * without checking the size of the device, e.g., when
1397                         * mounting a device.
1398                         */
1399                        handle_bad_sector(bio);
1400                        return 1;
1401                }
1402        }
1403
1404        return 0;
1405}
1406
1407/**
1408 * generic_make_request - hand a buffer to its device driver for I/O
1409 * @bio:  The bio describing the location in memory and on the device.
1410 *
1411 * generic_make_request() is used to make I/O requests of block
1412 * devices. It is passed a &struct bio, which describes the I/O that needs
1413 * to be done.
1414 *
1415 * generic_make_request() does not return any status.  The
1416 * success/failure status of the request, along with notification of
1417 * completion, is delivered asynchronously through the bio->bi_end_io
1418 * function described (one day) else where.
1419 *
1420 * The caller of generic_make_request must make sure that bi_io_vec
1421 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1422 * set to describe the device address, and the
1423 * bi_end_io and optionally bi_private are set to describe how
1424 * completion notification should be signaled.
1425 *
1426 * generic_make_request and the drivers it calls may use bi_next if this
1427 * bio happens to be merged with someone else, and may change bi_dev and
1428 * bi_sector for remaps as it sees fit.  So the values of these fields
1429 * should NOT be depended on after the call to generic_make_request.
1430 */
1431static inline void __generic_make_request(struct bio *bio)
1432{
1433        struct request_queue *q;
1434        sector_t old_sector;
1435        int ret, nr_sectors = bio_sectors(bio);
1436        dev_t old_dev;
1437        int err = -EIO;
1438
1439        might_sleep();
1440
1441        if (bio_check_eod(bio, nr_sectors))
1442                goto end_io;
1443
1444        /*
1445         * Resolve the mapping until finished. (drivers are
1446         * still free to implement/resolve their own stacking
1447         * by explicitly returning 0)
1448         *
1449         * NOTE: we don't repeat the blk_size check for each new device.
1450         * Stacking drivers are expected to know what they are doing.
1451         */
1452        old_sector = -1;
1453        old_dev = 0;
1454        do {
1455                char b[BDEVNAME_SIZE];
1456
1457                q = bdev_get_queue(bio->bi_bdev);
1458                if (unlikely(!q)) {
1459                        printk(KERN_ERR
1460                               "generic_make_request: Trying to access "
1461                                "nonexistent block-device %s (%Lu)\n",
1462                                bdevname(bio->bi_bdev, b),
1463                                (long long) bio->bi_sector);
1464                        goto end_io;
1465                }
1466
1467                if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1468                             nr_sectors > queue_max_hw_sectors(q))) {
1469                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1470                               bdevname(bio->bi_bdev, b),
1471                               bio_sectors(bio),
1472                               queue_max_hw_sectors(q));
1473                        goto end_io;
1474                }
1475
1476                if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1477                        goto end_io;
1478
1479                if (should_fail_request(bio))
1480                        goto end_io;
1481
1482                /*
1483                 * If this device has partitions, remap block n
1484                 * of partition p to block n+start(p) of the disk.
1485                 */
1486                blk_partition_remap(bio);
1487
1488                if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1489                        goto end_io;
1490
1491                if (old_sector != -1)
1492                        trace_block_remap(q, bio, old_dev, old_sector);
1493
1494                old_sector = bio->bi_sector;
1495                old_dev = bio->bi_bdev->bd_dev;
1496
1497                if (bio_check_eod(bio, nr_sectors))
1498                        goto end_io;
1499
1500                if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1501                    !blk_queue_discard(q)) {
1502                        err = -EOPNOTSUPP;
1503                        goto end_io;
1504                }
1505
1506                trace_block_bio_queue(q, bio);
1507
1508                ret = q->make_request_fn(q, bio);
1509        } while (ret);
1510
1511        return;
1512
1513end_io:
1514        bio_endio(bio, err);
1515}
1516
1517/*
1518 * We only want one ->make_request_fn to be active at a time,
1519 * else stack usage with stacked devices could be a problem.
1520 * So use current->bio_list to keep a list of requests
1521 * submited by a make_request_fn function.
1522 * current->bio_list is also used as a flag to say if
1523 * generic_make_request is currently active in this task or not.
1524 * If it is NULL, then no make_request is active.  If it is non-NULL,
1525 * then a make_request is active, and new requests should be added
1526 * at the tail
1527 */
1528void generic_make_request(struct bio *bio)
1529{
1530        struct bio_list bio_list_on_stack;
1531
1532        if (current->bio_list) {
1533                /* make_request is active */
1534                bio_list_add(current->bio_list, bio);
1535                return;
1536        }
1537        /* following loop may be a bit non-obvious, and so deserves some
1538         * explanation.
1539         * Before entering the loop, bio->bi_next is NULL (as all callers
1540         * ensure that) so we have a list with a single bio.
1541         * We pretend that we have just taken it off a longer list, so
1542         * we assign bio_list to a pointer to the bio_list_on_stack,
1543         * thus initialising the bio_list of new bios to be
1544         * added.  __generic_make_request may indeed add some more bios
1545         * through a recursive call to generic_make_request.  If it
1546         * did, we find a non-NULL value in bio_list and re-enter the loop
1547         * from the top.  In this case we really did just take the bio
1548         * of the top of the list (no pretending) and so remove it from
1549         * bio_list, and call into __generic_make_request again.
1550         *
1551         * The loop was structured like this to make only one call to
1552         * __generic_make_request (which is important as it is large and
1553         * inlined) and to keep the structure simple.
1554         */
1555        BUG_ON(bio->bi_next);
1556        bio_list_init(&bio_list_on_stack);
1557        current->bio_list = &bio_list_on_stack;
1558        do {
1559                __generic_make_request(bio);
1560                bio = bio_list_pop(current->bio_list);
1561        } while (bio);
1562        current->bio_list = NULL; /* deactivate */
1563}
1564EXPORT_SYMBOL(generic_make_request);
1565
1566/**
1567 * submit_bio - submit a bio to the block device layer for I/O
1568 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1569 * @bio: The &struct bio which describes the I/O
1570 *
1571 * submit_bio() is very similar in purpose to generic_make_request(), and
1572 * uses that function to do most of the work. Both are fairly rough
1573 * interfaces; @bio must be presetup and ready for I/O.
1574 *
1575 */
1576void submit_bio(int rw, struct bio *bio)
1577{
1578        int count = bio_sectors(bio);
1579
1580        bio->bi_rw |= rw;
1581
1582        /*
1583         * If it's a regular read/write or a barrier with data attached,
1584         * go through the normal accounting stuff before submission.
1585         */
1586        if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
1587                if (rw & WRITE) {
1588                        count_vm_events(PGPGOUT, count);
1589                } else {
1590                        task_io_account_read(bio->bi_size);
1591                        count_vm_events(PGPGIN, count);
1592                }
1593
1594                if (unlikely(block_dump)) {
1595                        char b[BDEVNAME_SIZE];
1596                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1597                        current->comm, task_pid_nr(current),
1598                                (rw & WRITE) ? "WRITE" : "READ",
1599                                (unsigned long long)bio->bi_sector,
1600                                bdevname(bio->bi_bdev, b));
1601                }
1602        }
1603
1604        generic_make_request(bio);
1605}
1606EXPORT_SYMBOL(submit_bio);
1607
1608/**
1609 * blk_rq_check_limits - Helper function to check a request for the queue limit
1610 * @q:  the queue
1611 * @rq: the request being checked
1612 *
1613 * Description:
1614 *    @rq may have been made based on weaker limitations of upper-level queues
1615 *    in request stacking drivers, and it may violate the limitation of @q.
1616 *    Since the block layer and the underlying device driver trust @rq
1617 *    after it is inserted to @q, it should be checked against @q before
1618 *    the insertion using this generic function.
1619 *
1620 *    This function should also be useful for request stacking drivers
1621 *    in some cases below, so export this fuction.
1622 *    Request stacking drivers like request-based dm may change the queue
1623 *    limits while requests are in the queue (e.g. dm's table swapping).
1624 *    Such request stacking drivers should check those requests agaist
1625 *    the new queue limits again when they dispatch those requests,
1626 *    although such checkings are also done against the old queue limits
1627 *    when submitting requests.
1628 */
1629int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1630{
1631        if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1632            blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1633                printk(KERN_ERR "%s: over max size limit.\n", __func__);
1634                return -EIO;
1635        }
1636
1637        /*
1638         * queue's settings related to segment counting like q->bounce_pfn
1639         * may differ from that of other stacking queues.
1640         * Recalculate it to check the request correctly on this queue's
1641         * limitation.
1642         */
1643        blk_recalc_rq_segments(rq);
1644        if (rq->nr_phys_segments > queue_max_segments(q)) {
1645                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1646                return -EIO;
1647        }
1648
1649        return 0;
1650}
1651EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1652
1653/**
1654 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1655 * @q:  the queue to submit the request
1656 * @rq: the request being queued
1657 */
1658int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1659{
1660        unsigned long flags;
1661
1662        if (blk_rq_check_limits(q, rq))
1663                return -EIO;
1664
1665#ifdef CONFIG_FAIL_MAKE_REQUEST
1666        if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
1667            should_fail(&fail_make_request, blk_rq_bytes(rq)))
1668                return -EIO;
1669#endif
1670
1671        spin_lock_irqsave(q->queue_lock, flags);
1672
1673        /*
1674         * Submitting request must be dequeued before calling this function
1675         * because it will be linked to another request_queue
1676         */
1677        BUG_ON(blk_queued_rq(rq));
1678
1679        drive_stat_acct(rq, 1);
1680        __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1681
1682        spin_unlock_irqrestore(q->queue_lock, flags);
1683
1684        return 0;
1685}
1686EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1687
1688/**
1689 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1690 * @rq: request to examine
1691 *
1692 * Description:
1693 *     A request could be merge of IOs which require different failure
1694 *     handling.  This function determines the number of bytes which
1695 *     can be failed from the beginning of the request without
1696 *     crossing into area which need to be retried further.
1697 *
1698 * Return:
1699 *     The number of bytes to fail.
1700 *
1701 * Context:
1702 *     queue_lock must be held.
1703 */
1704unsigned int blk_rq_err_bytes(const struct request *rq)
1705{
1706        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1707        unsigned int bytes = 0;
1708        struct bio *bio;
1709
1710        if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1711                return blk_rq_bytes(rq);
1712
1713        /*
1714         * Currently the only 'mixing' which can happen is between
1715         * different fastfail types.  We can safely fail portions
1716         * which have all the failfast bits that the first one has -
1717         * the ones which are at least as eager to fail as the first
1718         * one.
1719         */
1720        for (bio = rq->bio; bio; bio = bio->bi_next) {
1721                if ((bio->bi_rw & ff) != ff)
1722                        break;
1723                bytes += bio->bi_size;
1724        }
1725
1726        /* this could lead to infinite loop */
1727        BUG_ON(blk_rq_bytes(rq) && !bytes);
1728        return bytes;
1729}
1730EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1731
1732static void blk_account_io_completion(struct request *req, unsigned int bytes)
1733{
1734        if (blk_do_io_stat(req)) {
1735                const int rw = rq_data_dir(req);
1736                struct hd_struct *part;
1737                int cpu;
1738
1739                cpu = part_stat_lock();
1740                part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1741                part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1742                part_stat_unlock();
1743        }
1744}
1745
1746static void blk_account_io_done(struct request *req)
1747{
1748        /*
1749         * Account IO completion.  bar_rq isn't accounted as a normal
1750         * IO on queueing nor completion.  Accounting the containing
1751         * request is enough.
1752         */
1753        if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
1754                unsigned long duration = jiffies - req->start_time;
1755                const int rw = rq_data_dir(req);
1756                struct hd_struct *part;
1757                int cpu;
1758
1759                cpu = part_stat_lock();
1760                part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1761
1762                part_stat_inc(cpu, part, ios[rw]);
1763                part_stat_add(cpu, part, ticks[rw], duration);
1764                part_round_stats(cpu, part);
1765                part_dec_in_flight(part, rw);
1766
1767                part_stat_unlock();
1768        }
1769}
1770
1771/**
1772 * blk_peek_request - peek at the top of a request queue
1773 * @q: request queue to peek at
1774 *
1775 * Description:
1776 *     Return the request at the top of @q.  The returned request
1777 *     should be started using blk_start_request() before LLD starts
1778 *     processing it.
1779 *
1780 * Return:
1781 *     Pointer to the request at the top of @q if available.  Null
1782 *     otherwise.
1783 *
1784 * Context:
1785 *     queue_lock must be held.
1786 */
1787struct request *blk_peek_request(struct request_queue *q)
1788{
1789        struct request *rq;
1790        int ret;
1791
1792        while ((rq = __elv_next_request(q)) != NULL) {
1793                if (!(rq->cmd_flags & REQ_STARTED)) {
1794                        /*
1795                         * This is the first time the device driver
1796                         * sees this request (possibly after
1797                         * requeueing).  Notify IO scheduler.
1798                         */
1799                        if (blk_sorted_rq(rq))
1800                                elv_activate_rq(q, rq);
1801
1802                        /*
1803                         * just mark as started even if we don't start
1804                         * it, a request that has been delayed should
1805                         * not be passed by new incoming requests
1806                         */
1807                        rq->cmd_flags |= REQ_STARTED;
1808                        trace_block_rq_issue(q, rq);
1809                }
1810
1811                if (!q->boundary_rq || q->boundary_rq == rq) {
1812                        q->end_sector = rq_end_sector(rq);
1813                        q->boundary_rq = NULL;
1814                }
1815
1816                if (rq->cmd_flags & REQ_DONTPREP)
1817                        break;
1818
1819                if (q->dma_drain_size && blk_rq_bytes(rq)) {
1820                        /*
1821                         * make sure space for the drain appears we
1822                         * know we can do this because max_hw_segments
1823                         * has been adjusted to be one fewer than the
1824                         * device can handle
1825                         */
1826                        rq->nr_phys_segments++;
1827                }
1828
1829                if (!q->prep_rq_fn)
1830                        break;
1831
1832                ret = q->prep_rq_fn(q, rq);
1833                if (ret == BLKPREP_OK) {
1834                        break;
1835                } else if (ret == BLKPREP_DEFER) {
1836                        /*
1837                         * the request may have been (partially) prepped.
1838                         * we need to keep this request in the front to
1839                         * avoid resource deadlock.  REQ_STARTED will
1840                         * prevent other fs requests from passing this one.
1841                         */
1842                        if (q->dma_drain_size && blk_rq_bytes(rq) &&
1843                            !(rq->cmd_flags & REQ_DONTPREP)) {
1844                                /*
1845                                 * remove the space for the drain we added
1846                                 * so that we don't add it again
1847                                 */
1848                                --rq->nr_phys_segments;
1849                        }
1850
1851                        rq = NULL;
1852                        break;
1853                } else if (ret == BLKPREP_KILL) {
1854                        rq->cmd_flags |= REQ_QUIET;
1855                        /*
1856                         * Mark this request as started so we don't trigger
1857                         * any debug logic in the end I/O path.
1858                         */
1859                        blk_start_request(rq);
1860                        __blk_end_request_all(rq, -EIO);
1861                } else {
1862                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1863                        break;
1864                }
1865        }
1866
1867        return rq;
1868}
1869EXPORT_SYMBOL(blk_peek_request);
1870
1871void blk_dequeue_request(struct request *rq)
1872{
1873        struct request_queue *q = rq->q;
1874
1875        BUG_ON(list_empty(&rq->queuelist));
1876        BUG_ON(ELV_ON_HASH(rq));
1877
1878        list_del_init(&rq->queuelist);
1879
1880        /*
1881         * the time frame between a request being removed from the lists
1882         * and to it is freed is accounted as io that is in progress at
1883         * the driver side.
1884         */
1885        if (blk_account_rq(rq)) {
1886                q->in_flight[rq_is_sync(rq)]++;
1887                set_io_start_time_ns(rq);
1888        }
1889}
1890
1891/**
1892 * blk_start_request - start request processing on the driver
1893 * @req: request to dequeue
1894 *
1895 * Description:
1896 *     Dequeue @req and start timeout timer on it.  This hands off the
1897 *     request to the driver.
1898 *
1899 *     Block internal functions which don't want to start timer should
1900 *     call blk_dequeue_request().
1901 *
1902 * Context:
1903 *     queue_lock must be held.
1904 */
1905void blk_start_request(struct request *req)
1906{
1907        blk_dequeue_request(req);
1908
1909        /*
1910         * We are now handing the request to the hardware, initialize
1911         * resid_len to full count and add the timeout handler.
1912         */
1913        req->resid_len = blk_rq_bytes(req);
1914        if (unlikely(blk_bidi_rq(req)))
1915                req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
1916
1917        blk_add_timer(req);
1918}
1919EXPORT_SYMBOL(blk_start_request);
1920
1921/**
1922 * blk_fetch_request - fetch a request from a request queue
1923 * @q: request queue to fetch a request from
1924 *
1925 * Description:
1926 *     Return the request at the top of @q.  The request is started on
1927 *     return and LLD can start processing it immediately.
1928 *
1929 * Return:
1930 *     Pointer to the request at the top of @q if available.  Null
1931 *     otherwise.
1932 *
1933 * Context:
1934 *     queue_lock must be held.
1935 */
1936struct request *blk_fetch_request(struct request_queue *q)
1937{
1938        struct request *rq;
1939
1940        rq = blk_peek_request(q);
1941        if (rq)
1942                blk_start_request(rq);
1943        return rq;
1944}
1945EXPORT_SYMBOL(blk_fetch_request);
1946
1947/**
1948 * blk_update_request - Special helper function for request stacking drivers
1949 * @req:      the request being processed
1950 * @error:    %0 for success, < %0 for error
1951 * @nr_bytes: number of bytes to complete @req
1952 *
1953 * Description:
1954 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
1955 *     the request structure even if @req doesn't have leftover.
1956 *     If @req has leftover, sets it up for the next range of segments.
1957 *
1958 *     This special helper function is only for request stacking drivers
1959 *     (e.g. request-based dm) so that they can handle partial completion.
1960 *     Actual device drivers should use blk_end_request instead.
1961 *
1962 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1963 *     %false return from this function.
1964 *
1965 * Return:
1966 *     %false - this request doesn't have any more data
1967 *     %true  - this request has more data
1968 **/
1969bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1970{
1971        int total_bytes, bio_nbytes, next_idx = 0;
1972        struct bio *bio;
1973
1974        if (!req->bio)
1975                return false;
1976
1977        trace_block_rq_complete(req->q, req);
1978
1979        /*
1980         * For fs requests, rq is just carrier of independent bio's
1981         * and each partial completion should be handled separately.
1982         * Reset per-request error on each partial completion.
1983         *
1984         * TODO: tj: This is too subtle.  It would be better to let
1985         * low level drivers do what they see fit.
1986         */
1987        if (blk_fs_request(req))
1988                req->errors = 0;
1989
1990        if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1991                printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1992                                req->rq_disk ? req->rq_disk->disk_name : "?",
1993                                (unsigned long long)blk_rq_pos(req));
1994        }
1995
1996        blk_account_io_completion(req, nr_bytes);
1997
1998        total_bytes = bio_nbytes = 0;
1999        while ((bio = req->bio) != NULL) {
2000                int nbytes;
2001
2002                if (nr_bytes >= bio->bi_size) {
2003                        req->bio = bio->bi_next;
2004                        nbytes = bio->bi_size;
2005                        req_bio_endio(req, bio, nbytes, error);
2006                        next_idx = 0;
2007                        bio_nbytes = 0;
2008                } else {
2009                        int idx = bio->bi_idx + next_idx;
2010
2011                        if (unlikely(idx >= bio->bi_vcnt)) {
2012                                blk_dump_rq_flags(req, "__end_that");
2013                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
2014                                       __func__, idx, bio->bi_vcnt);
2015                                break;
2016                        }
2017
2018                        nbytes = bio_iovec_idx(bio, idx)->bv_len;
2019                        BIO_BUG_ON(nbytes > bio->bi_size);
2020
2021                        /*
2022                         * not a complete bvec done
2023                         */
2024                        if (unlikely(nbytes > nr_bytes)) {
2025                                bio_nbytes += nr_bytes;
2026                                total_bytes += nr_bytes;
2027                                break;
2028                        }
2029
2030                        /*
2031                         * advance to the next vector
2032                         */
2033                        next_idx++;
2034                        bio_nbytes += nbytes;
2035                }
2036
2037                total_bytes += nbytes;
2038                nr_bytes -= nbytes;
2039
2040                bio = req->bio;
2041                if (bio) {
2042                        /*
2043                         * end more in this run, or just return 'not-done'
2044                         */
2045                        if (unlikely(nr_bytes <= 0))
2046                                break;
2047                }
2048        }
2049
2050        /*
2051         * completely done
2052         */
2053        if (!req->bio) {
2054                /*
2055                 * Reset counters so that the request stacking driver
2056                 * can find how many bytes remain in the request
2057                 * later.
2058                 */
2059                req->__data_len = 0;
2060                return false;
2061        }
2062
2063        /*
2064         * if the request wasn't completed, update state
2065         */
2066        if (bio_nbytes) {
2067                req_bio_endio(req, bio, bio_nbytes, error);
2068                bio->bi_idx += next_idx;
2069                bio_iovec(bio)->bv_offset += nr_bytes;
2070                bio_iovec(bio)->bv_len -= nr_bytes;
2071        }
2072
2073        req->__data_len -= total_bytes;
2074        req->buffer = bio_data(req->bio);
2075
2076        /* update sector only for requests with clear definition of sector */
2077        if (blk_fs_request(req) || blk_discard_rq(req))
2078                req->__sector += total_bytes >> 9;
2079
2080        /* mixed attributes always follow the first bio */
2081        if (req->cmd_flags & REQ_MIXED_MERGE) {
2082                req->cmd_flags &= ~REQ_FAILFAST_MASK;
2083                req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2084        }
2085
2086        /*
2087         * If total number of sectors is less than the first segment
2088         * size, something has gone terribly wrong.
2089         */
2090        if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2091                printk(KERN_ERR "blk: request botched\n");
2092                req->__data_len = blk_rq_cur_bytes(req);
2093        }
2094
2095        /* recalculate the number of segments */
2096        blk_recalc_rq_segments(req);
2097
2098        return true;
2099}
2100EXPORT_SYMBOL_GPL(blk_update_request);
2101
2102static bool blk_update_bidi_request(struct request *rq, int error,
2103                                    unsigned int nr_bytes,
2104                                    unsigned int bidi_bytes)
2105{
2106        if (blk_update_request(rq, error, nr_bytes))
2107                return true;
2108
2109        /* Bidi request must be completed as a whole */
2110        if (unlikely(blk_bidi_rq(rq)) &&
2111            blk_update_request(rq->next_rq, error, bidi_bytes))
2112                return true;
2113
2114        add_disk_randomness(rq->rq_disk);
2115
2116        return false;
2117}
2118
2119/*
2120 * queue lock must be held
2121 */
2122static void blk_finish_request(struct request *req, int error)
2123{
2124        if (blk_rq_tagged(req))
2125                blk_queue_end_tag(req->q, req);
2126
2127        BUG_ON(blk_queued_rq(req));
2128
2129        if (unlikely(laptop_mode) && blk_fs_request(req))
2130                laptop_io_completion(&req->q->backing_dev_info);
2131
2132        blk_delete_timer(req);
2133
2134        blk_account_io_done(req);
2135
2136        if (req->end_io)
2137                req->end_io(req, error);
2138        else {
2139                if (blk_bidi_rq(req))
2140                        __blk_put_request(req->next_rq->q, req->next_rq);
2141
2142                __blk_put_request(req->q, req);
2143        }
2144}
2145
2146/**
2147 * blk_end_bidi_request - Complete a bidi request
2148 * @rq:         the request to complete
2149 * @error:      %0 for success, < %0 for error
2150 * @nr_bytes:   number of bytes to complete @rq
2151 * @bidi_bytes: number of bytes to complete @rq->next_rq
2152 *
2153 * Description:
2154 *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2155 *     Drivers that supports bidi can safely call this member for any
2156 *     type of request, bidi or uni.  In the later case @bidi_bytes is
2157 *     just ignored.
2158 *
2159 * Return:
2160 *     %false - we are done with this request
2161 *     %true  - still buffers pending for this request
2162 **/
2163static bool blk_end_bidi_request(struct request *rq, int error,
2164                                 unsigned int nr_bytes, unsigned int bidi_bytes)
2165{
2166        struct request_queue *q = rq->q;
2167        unsigned long flags;
2168
2169        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2170                return true;
2171
2172        spin_lock_irqsave(q->queue_lock, flags);
2173        blk_finish_request(rq, error);
2174        spin_unlock_irqrestore(q->queue_lock, flags);
2175
2176        return false;
2177}
2178
2179/**
2180 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2181 * @rq:         the request to complete
2182 * @error:      %0 for success, < %0 for error
2183 * @nr_bytes:   number of bytes to complete @rq
2184 * @bidi_bytes: number of bytes to complete @rq->next_rq
2185 *
2186 * Description:
2187 *     Identical to blk_end_bidi_request() except that queue lock is
2188 *     assumed to be locked on entry and remains so on return.
2189 *
2190 * Return:
2191 *     %false - we are done with this request
2192 *     %true  - still buffers pending for this request
2193 **/
2194static bool __blk_end_bidi_request(struct request *rq, int error,
2195                                   unsigned int nr_bytes, unsigned int bidi_bytes)
2196{
2197        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2198                return true;
2199
2200        blk_finish_request(rq, error);
2201
2202        return false;
2203}
2204
2205/**
2206 * blk_end_request - Helper function for drivers to complete the request.
2207 * @rq:       the request being processed
2208 * @error:    %0 for success, < %0 for error
2209 * @nr_bytes: number of bytes to complete
2210 *
2211 * Description:
2212 *     Ends I/O on a number of bytes attached to @rq.
2213 *     If @rq has leftover, sets it up for the next range of segments.
2214 *
2215 * Return:
2216 *     %false - we are done with this request
2217 *     %true  - still buffers pending for this request
2218 **/
2219bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2220{
2221        return blk_end_bidi_request(rq, error, nr_bytes, 0);
2222}
2223EXPORT_SYMBOL(blk_end_request);
2224
2225/**
2226 * blk_end_request_all - Helper function for drives to finish the request.
2227 * @rq: the request to finish
2228 * @error: %0 for success, < %0 for error
2229 *
2230 * Description:
2231 *     Completely finish @rq.
2232 */
2233void blk_end_request_all(struct request *rq, int error)
2234{
2235        bool pending;
2236        unsigned int bidi_bytes = 0;
2237
2238        if (unlikely(blk_bidi_rq(rq)))
2239                bidi_bytes = blk_rq_bytes(rq->next_rq);
2240
2241        pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2242        BUG_ON(pending);
2243}
2244EXPORT_SYMBOL(blk_end_request_all);
2245
2246/**
2247 * blk_end_request_cur - Helper function to finish the current request chunk.
2248 * @rq: the request to finish the current chunk for
2249 * @error: %0 for success, < %0 for error
2250 *
2251 * Description:
2252 *     Complete the current consecutively mapped chunk from @rq.
2253 *
2254 * Return:
2255 *     %false - we are done with this request
2256 *     %true  - still buffers pending for this request
2257 */
2258bool blk_end_request_cur(struct request *rq, int error)
2259{
2260        return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2261}
2262EXPORT_SYMBOL(blk_end_request_cur);
2263
2264/**
2265 * blk_end_request_err - Finish a request till the next failure boundary.
2266 * @rq: the request to finish till the next failure boundary for
2267 * @error: must be negative errno
2268 *
2269 * Description:
2270 *     Complete @rq till the next failure boundary.
2271 *
2272 * Return:
2273 *     %false - we are done with this request
2274 *     %true  - still buffers pending for this request
2275 */
2276bool blk_end_request_err(struct request *rq, int error)
2277{
2278        WARN_ON(error >= 0);
2279        return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2280}
2281EXPORT_SYMBOL_GPL(blk_end_request_err);
2282
2283/**
2284 * __blk_end_request - Helper function for drivers to complete the request.
2285 * @rq:       the request being processed
2286 * @error:    %0 for success, < %0 for error
2287 * @nr_bytes: number of bytes to complete
2288 *
2289 * Description:
2290 *     Must be called with queue lock held unlike blk_end_request().
2291 *
2292 * Return:
2293 *     %false - we are done with this request
2294 *     %true  - still buffers pending for this request
2295 **/
2296bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2297{
2298        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2299}
2300EXPORT_SYMBOL(__blk_end_request);
2301
2302/**
2303 * __blk_end_request_all - Helper function for drives to finish the request.
2304 * @rq: the request to finish
2305 * @error: %0 for success, < %0 for error
2306 *
2307 * Description:
2308 *     Completely finish @rq.  Must be called with queue lock held.
2309 */
2310void __blk_end_request_all(struct request *rq, int error)
2311{
2312        bool pending;
2313        unsigned int bidi_bytes = 0;
2314
2315        if (unlikely(blk_bidi_rq(rq)))
2316                bidi_bytes = blk_rq_bytes(rq->next_rq);
2317
2318        pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2319        BUG_ON(pending);
2320}
2321EXPORT_SYMBOL(__blk_end_request_all);
2322
2323/**
2324 * __blk_end_request_cur - Helper function to finish the current request chunk.
2325 * @rq: the request to finish the current chunk for
2326 * @error: %0 for success, < %0 for error
2327 *
2328 * Description:
2329 *     Complete the current consecutively mapped chunk from @rq.  Must
2330 *     be called with queue lock held.
2331 *
2332 * Return:
2333 *     %false - we are done with this request
2334 *     %true  - still buffers pending for this request
2335 */
2336bool __blk_end_request_cur(struct request *rq, int error)
2337{
2338        return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2339}
2340EXPORT_SYMBOL(__blk_end_request_cur);
2341
2342/**
2343 * __blk_end_request_err - Finish a request till the next failure boundary.
2344 * @rq: the request to finish till the next failure boundary for
2345 * @error: must be negative errno
2346 *
2347 * Description:
2348 *     Complete @rq till the next failure boundary.  Must be called
2349 *     with queue lock held.
2350 *
2351 * Return:
2352 *     %false - we are done with this request
2353 *     %true  - still buffers pending for this request
2354 */
2355bool __blk_end_request_err(struct request *rq, int error)
2356{
2357        WARN_ON(error >= 0);
2358        return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2359}
2360EXPORT_SYMBOL_GPL(__blk_end_request_err);
2361
2362void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2363                     struct bio *bio)
2364{
2365        /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2366        rq->cmd_flags |= bio->bi_rw & REQ_RW;
2367
2368        if (bio_has_data(bio)) {
2369                rq->nr_phys_segments = bio_phys_segments(q, bio);
2370                rq->buffer = bio_data(bio);
2371        }
2372        rq->__data_len = bio->bi_size;
2373        rq->bio = rq->biotail = bio;
2374
2375        if (bio->bi_bdev)
2376                rq->rq_disk = bio->bi_bdev->bd_disk;
2377}
2378
2379#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2380/**
2381 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2382 * @rq: the request to be flushed
2383 *
2384 * Description:
2385 *     Flush all pages in @rq.
2386 */
2387void rq_flush_dcache_pages(struct request *rq)
2388{
2389        struct req_iterator iter;
2390        struct bio_vec *bvec;
2391
2392        rq_for_each_segment(bvec, rq, iter)
2393                flush_dcache_page(bvec->bv_page);
2394}
2395EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2396#endif
2397
2398/**
2399 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2400 * @q : the queue of the device being checked
2401 *
2402 * Description:
2403 *    Check if underlying low-level drivers of a device are busy.
2404 *    If the drivers want to export their busy state, they must set own
2405 *    exporting function using blk_queue_lld_busy() first.
2406 *
2407 *    Basically, this function is used only by request stacking drivers
2408 *    to stop dispatching requests to underlying devices when underlying
2409 *    devices are busy.  This behavior helps more I/O merging on the queue
2410 *    of the request stacking driver and prevents I/O throughput regression
2411 *    on burst I/O load.
2412 *
2413 * Return:
2414 *    0 - Not busy (The request stacking driver should dispatch request)
2415 *    1 - Busy (The request stacking driver should stop dispatching request)
2416 */
2417int blk_lld_busy(struct request_queue *q)
2418{
2419        if (q->lld_busy_fn)
2420                return q->lld_busy_fn(q);
2421
2422        return 0;
2423}
2424EXPORT_SYMBOL_GPL(blk_lld_busy);
2425
2426/**
2427 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2428 * @rq: the clone request to be cleaned up
2429 *
2430 * Description:
2431 *     Free all bios in @rq for a cloned request.
2432 */
2433void blk_rq_unprep_clone(struct request *rq)
2434{
2435        struct bio *bio;
2436
2437        while ((bio = rq->bio) != NULL) {
2438                rq->bio = bio->bi_next;
2439
2440                bio_put(bio);
2441        }
2442}
2443EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2444
2445/*
2446 * Copy attributes of the original request to the clone request.
2447 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2448 */
2449static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2450{
2451        dst->cpu = src->cpu;
2452        dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
2453        dst->cmd_type = src->cmd_type;
2454        dst->__sector = blk_rq_pos(src);
2455        dst->__data_len = blk_rq_bytes(src);
2456        dst->nr_phys_segments = src->nr_phys_segments;
2457        dst->ioprio = src->ioprio;
2458        dst->extra_len = src->extra_len;
2459}
2460
2461/**
2462 * blk_rq_prep_clone - Helper function to setup clone request
2463 * @rq: the request to be setup
2464 * @rq_src: original request to be cloned
2465 * @bs: bio_set that bios for clone are allocated from
2466 * @gfp_mask: memory allocation mask for bio
2467 * @bio_ctr: setup function to be called for each clone bio.
2468 *           Returns %0 for success, non %0 for failure.
2469 * @data: private data to be passed to @bio_ctr
2470 *
2471 * Description:
2472 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2473 *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2474 *     are not copied, and copying such parts is the caller's responsibility.
2475 *     Also, pages which the original bios are pointing to are not copied
2476 *     and the cloned bios just point same pages.
2477 *     So cloned bios must be completed before original bios, which means
2478 *     the caller must complete @rq before @rq_src.
2479 */
2480int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2481                      struct bio_set *bs, gfp_t gfp_mask,
2482                      int (*bio_ctr)(struct bio *, struct bio *, void *),
2483                      void *data)
2484{
2485        struct bio *bio, *bio_src;
2486
2487        if (!bs)
2488                bs = fs_bio_set;
2489
2490        blk_rq_init(NULL, rq);
2491
2492        __rq_for_each_bio(bio_src, rq_src) {
2493                bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
2494                if (!bio)
2495                        goto free_and_out;
2496
2497                __bio_clone(bio, bio_src);
2498
2499                if (bio_integrity(bio_src) &&
2500                    bio_integrity_clone(bio, bio_src, gfp_mask, bs))
2501                        goto free_and_out;
2502
2503                if (bio_ctr && bio_ctr(bio, bio_src, data))
2504                        goto free_and_out;
2505
2506                if (rq->bio) {
2507                        rq->biotail->bi_next = bio;
2508                        rq->biotail = bio;
2509                } else
2510                        rq->bio = rq->biotail = bio;
2511        }
2512
2513        __blk_rq_prep_clone(rq, rq_src);
2514
2515        return 0;
2516
2517free_and_out:
2518        if (bio)
2519                bio_free(bio, bs);
2520        blk_rq_unprep_clone(rq);
2521
2522        return -ENOMEM;
2523}
2524EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2525
2526int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2527{
2528        return queue_work(kblockd_workqueue, work);
2529}
2530EXPORT_SYMBOL(kblockd_schedule_work);
2531
2532int __init blk_dev_init(void)
2533{
2534        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
2535                        sizeof(((struct request *)0)->cmd_flags));
2536
2537        kblockd_workqueue = create_workqueue("kblockd");
2538        if (!kblockd_workqueue)
2539                panic("Failed to create kblockd\n");
2540
2541        request_cachep = kmem_cache_create("blkdev_requests",
2542                        sizeof(struct request), 0, SLAB_PANIC, NULL);
2543
2544        blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2545                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2546
2547        return 0;
2548}
2549